Print this page
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (aslr)
7031 noexec_user_stack should be a secflag
7032 want a means to forbid mappings around NULL.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/vm/vm_machdep.c
+++ new/usr/src/uts/i86pc/vm/vm_machdep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 27 */
28 28
29 29 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 30 /* All Rights Reserved */
31 31
32 32 /*
33 33 * Portions of this source code were derived from Berkeley 4.3 BSD
34 34 * under license from the Regents of the University of California.
35 35 */
36 36
37 37 /*
38 38 * UNIX machine dependent virtual memory support.
39 39 */
40 40
41 41 #include <sys/types.h>
42 42 #include <sys/param.h>
43 43 #include <sys/systm.h>
44 44 #include <sys/user.h>
45 45 #include <sys/proc.h>
46 46 #include <sys/kmem.h>
47 47 #include <sys/vmem.h>
48 48 #include <sys/buf.h>
49 49 #include <sys/cpuvar.h>
50 50 #include <sys/lgrp.h>
51 51 #include <sys/disp.h>
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
52 52 #include <sys/vm.h>
53 53 #include <sys/mman.h>
54 54 #include <sys/vnode.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/exec.h>
57 57 #include <sys/exechdr.h>
58 58 #include <sys/debug.h>
59 59 #include <sys/vmsystm.h>
60 60 #include <sys/swap.h>
61 61 #include <sys/dumphdr.h>
62 +#include <sys/random.h>
62 63
63 64 #include <vm/hat.h>
64 65 #include <vm/as.h>
65 66 #include <vm/seg.h>
66 67 #include <vm/seg_kp.h>
67 68 #include <vm/seg_vn.h>
68 69 #include <vm/page.h>
69 70 #include <vm/seg_kmem.h>
70 71 #include <vm/seg_kpm.h>
71 72 #include <vm/vm_dep.h>
72 73
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
73 74 #include <sys/cpu.h>
74 75 #include <sys/vm_machparam.h>
75 76 #include <sys/memlist.h>
76 77 #include <sys/bootconf.h> /* XXX the memlist stuff belongs in memlist_plat.h */
77 78 #include <vm/hat_i86.h>
78 79 #include <sys/x86_archext.h>
79 80 #include <sys/elf_386.h>
80 81 #include <sys/cmn_err.h>
81 82 #include <sys/archsystm.h>
82 83 #include <sys/machsystm.h>
84 +#include <sys/secflags.h>
83 85
84 86 #include <sys/vtrace.h>
85 87 #include <sys/ddidmareq.h>
86 88 #include <sys/promif.h>
87 89 #include <sys/memnode.h>
88 90 #include <sys/stack.h>
89 91 #include <util/qsort.h>
90 92 #include <sys/taskq.h>
91 93
92 94 #ifdef __xpv
93 95
94 96 #include <sys/hypervisor.h>
95 97 #include <sys/xen_mmu.h>
96 98 #include <sys/balloon_impl.h>
97 99
98 100 /*
99 101 * domain 0 pages usable for DMA are kept pre-allocated and kept in
100 102 * distinct lists, ordered by increasing mfn.
101 103 */
102 104 static kmutex_t io_pool_lock;
103 105 static kmutex_t contig_list_lock;
104 106 static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */
105 107 static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */
106 108 static long io_pool_cnt;
107 109 static long io_pool_cnt_max = 0;
108 110 #define DEFAULT_IO_POOL_MIN 128
109 111 static long io_pool_cnt_min = DEFAULT_IO_POOL_MIN;
110 112 static long io_pool_cnt_lowater = 0;
111 113 static long io_pool_shrink_attempts; /* how many times did we try to shrink */
112 114 static long io_pool_shrinks; /* how many times did we really shrink */
113 115 static long io_pool_grows; /* how many times did we grow */
114 116 static mfn_t start_mfn = 1;
115 117 static caddr_t io_pool_kva; /* use to alloc pages when needed */
116 118
117 119 static int create_contig_pfnlist(uint_t);
118 120
119 121 /*
120 122 * percentage of phys mem to hold in the i/o pool
121 123 */
122 124 #define DEFAULT_IO_POOL_PCT 2
123 125 static long io_pool_physmem_pct = DEFAULT_IO_POOL_PCT;
124 126 static void page_io_pool_sub(page_t **, page_t *, page_t *);
125 127 int ioalloc_dbg = 0;
126 128
127 129 #endif /* __xpv */
128 130
129 131 uint_t vac_colors = 1;
130 132
131 133 int largepagesupport = 0;
132 134 extern uint_t page_create_new;
133 135 extern uint_t page_create_exists;
134 136 extern uint_t page_create_putbacks;
135 137 /*
136 138 * Allow users to disable the kernel's use of SSE.
137 139 */
138 140 extern int use_sse_pagecopy, use_sse_pagezero;
139 141
140 142 /*
141 143 * combined memory ranges from mnode and memranges[] to manage single
142 144 * mnode/mtype dimension in the page lists.
143 145 */
144 146 typedef struct {
145 147 pfn_t mnr_pfnlo;
146 148 pfn_t mnr_pfnhi;
147 149 int mnr_mnode;
148 150 int mnr_memrange; /* index into memranges[] */
149 151 int mnr_next; /* next lower PA mnoderange */
150 152 int mnr_exists;
151 153 /* maintain page list stats */
152 154 pgcnt_t mnr_mt_clpgcnt; /* cache list cnt */
153 155 pgcnt_t mnr_mt_flpgcnt[MMU_PAGE_SIZES]; /* free list cnt per szc */
154 156 pgcnt_t mnr_mt_totcnt; /* sum of cache and free lists */
155 157 #ifdef DEBUG
156 158 struct mnr_mts { /* mnode/mtype szc stats */
157 159 pgcnt_t mnr_mts_pgcnt;
158 160 int mnr_mts_colors;
159 161 pgcnt_t *mnr_mtsc_pgcnt;
160 162 } *mnr_mts;
161 163 #endif
162 164 } mnoderange_t;
163 165
164 166 #define MEMRANGEHI(mtype) \
165 167 ((mtype > 0) ? memranges[mtype - 1] - 1: physmax)
166 168 #define MEMRANGELO(mtype) (memranges[mtype])
167 169
168 170 #define MTYPE_FREEMEM(mt) (mnoderanges[mt].mnr_mt_totcnt)
169 171
170 172 /*
171 173 * As the PC architecture evolved memory up was clumped into several
172 174 * ranges for various historical I/O devices to do DMA.
173 175 * < 16Meg - ISA bus
174 176 * < 2Gig - ???
175 177 * < 4Gig - PCI bus or drivers that don't understand PAE mode
176 178 *
177 179 * These are listed in reverse order, so that we can skip over unused
178 180 * ranges on machines with small memories.
179 181 *
180 182 * For now under the Hypervisor, we'll only ever have one memrange.
181 183 */
182 184 #define PFN_4GIG 0x100000
183 185 #define PFN_16MEG 0x1000
184 186 /* Indices into the memory range (arch_memranges) array. */
185 187 #define MRI_4G 0
186 188 #define MRI_2G 1
187 189 #define MRI_16M 2
188 190 #define MRI_0 3
189 191 static pfn_t arch_memranges[NUM_MEM_RANGES] = {
190 192 PFN_4GIG, /* pfn range for 4G and above */
191 193 0x80000, /* pfn range for 2G-4G */
192 194 PFN_16MEG, /* pfn range for 16M-2G */
193 195 0x00000, /* pfn range for 0-16M */
194 196 };
195 197 pfn_t *memranges = &arch_memranges[0];
196 198 int nranges = NUM_MEM_RANGES;
197 199
198 200 /*
199 201 * This combines mem_node_config and memranges into one data
200 202 * structure to be used for page list management.
201 203 */
202 204 mnoderange_t *mnoderanges;
203 205 int mnoderangecnt;
204 206 int mtype4g;
205 207 int mtype16m;
206 208 int mtypetop; /* index of highest pfn'ed mnoderange */
207 209
208 210 /*
209 211 * 4g memory management variables for systems with more than 4g of memory:
210 212 *
211 213 * physical memory below 4g is required for 32bit dma devices and, currently,
212 214 * for kmem memory. On systems with more than 4g of memory, the pool of memory
213 215 * below 4g can be depleted without any paging activity given that there is
214 216 * likely to be sufficient memory above 4g.
215 217 *
216 218 * physmax4g is set true if the largest pfn is over 4g. The rest of the
217 219 * 4g memory management code is enabled only when physmax4g is true.
218 220 *
219 221 * maxmem4g is the count of the maximum number of pages on the page lists
220 222 * with physical addresses below 4g. It can be a lot less then 4g given that
221 223 * BIOS may reserve large chunks of space below 4g for hot plug pci devices,
222 224 * agp aperture etc.
223 225 *
224 226 * freemem4g maintains the count of the number of available pages on the
225 227 * page lists with physical addresses below 4g.
226 228 *
227 229 * DESFREE4G specifies the desired amount of below 4g memory. It defaults to
228 230 * 6% (desfree4gshift = 4) of maxmem4g.
229 231 *
230 232 * RESTRICT4G_ALLOC returns true if freemem4g falls below DESFREE4G
231 233 * and the amount of physical memory above 4g is greater than freemem4g.
232 234 * In this case, page_get_* routines will restrict below 4g allocations
233 235 * for requests that don't specifically require it.
234 236 */
235 237
236 238 #define DESFREE4G (maxmem4g >> desfree4gshift)
237 239
238 240 #define RESTRICT4G_ALLOC \
239 241 (physmax4g && (freemem4g < DESFREE4G) && ((freemem4g << 1) < freemem))
240 242
241 243 static pgcnt_t maxmem4g;
242 244 static pgcnt_t freemem4g;
243 245 static int physmax4g;
244 246 static int desfree4gshift = 4; /* maxmem4g shift to derive DESFREE4G */
245 247
246 248 /*
247 249 * 16m memory management:
248 250 *
249 251 * reserve some amount of physical memory below 16m for legacy devices.
250 252 *
251 253 * RESTRICT16M_ALLOC returns true if an there are sufficient free pages above
252 254 * 16m or if the 16m pool drops below DESFREE16M.
253 255 *
254 256 * In this case, general page allocations via page_get_{free,cache}list
255 257 * routines will be restricted from allocating from the 16m pool. Allocations
256 258 * that require specific pfn ranges (page_get_anylist) and PG_PANIC allocations
257 259 * are not restricted.
258 260 */
259 261
260 262 #define FREEMEM16M MTYPE_FREEMEM(mtype16m)
261 263 #define DESFREE16M desfree16m
262 264 #define RESTRICT16M_ALLOC(freemem, pgcnt, flags) \
263 265 ((freemem != 0) && ((flags & PG_PANIC) == 0) && \
264 266 ((freemem >= (FREEMEM16M)) || \
265 267 (FREEMEM16M < (DESFREE16M + pgcnt))))
266 268
267 269 static pgcnt_t desfree16m = 0x380;
268 270
269 271 /*
270 272 * This can be patched via /etc/system to allow old non-PAE aware device
271 273 * drivers to use kmem_alloc'd memory on 32 bit systems with > 4Gig RAM.
272 274 */
273 275 int restricted_kmemalloc = 0;
274 276
275 277 #ifdef VM_STATS
276 278 struct {
277 279 ulong_t pga_alloc;
278 280 ulong_t pga_notfullrange;
279 281 ulong_t pga_nulldmaattr;
280 282 ulong_t pga_allocok;
281 283 ulong_t pga_allocfailed;
282 284 ulong_t pgma_alloc;
283 285 ulong_t pgma_allocok;
284 286 ulong_t pgma_allocfailed;
285 287 ulong_t pgma_allocempty;
286 288 } pga_vmstats;
287 289 #endif
288 290
289 291 uint_t mmu_page_sizes;
290 292
291 293 /* How many page sizes the users can see */
292 294 uint_t mmu_exported_page_sizes;
293 295
294 296 /* page sizes that legacy applications can see */
295 297 uint_t mmu_legacy_page_sizes;
296 298
297 299 /*
298 300 * Number of pages in 1 GB. Don't enable automatic large pages if we have
299 301 * fewer than this many pages.
300 302 */
301 303 pgcnt_t shm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
302 304 pgcnt_t privm_lpg_min_physmem = 1 << (30 - MMU_PAGESHIFT);
303 305
304 306 /*
305 307 * Maximum and default segment size tunables for user private
306 308 * and shared anon memory, and user text and initialized data.
307 309 * These can be patched via /etc/system to allow large pages
308 310 * to be used for mapping application private and shared anon memory.
309 311 */
310 312 size_t mcntl0_lpsize = MMU_PAGESIZE;
311 313 size_t max_uheap_lpsize = MMU_PAGESIZE;
312 314 size_t default_uheap_lpsize = MMU_PAGESIZE;
313 315 size_t max_ustack_lpsize = MMU_PAGESIZE;
314 316 size_t default_ustack_lpsize = MMU_PAGESIZE;
315 317 size_t max_privmap_lpsize = MMU_PAGESIZE;
316 318 size_t max_uidata_lpsize = MMU_PAGESIZE;
317 319 size_t max_utext_lpsize = MMU_PAGESIZE;
318 320 size_t max_shm_lpsize = MMU_PAGESIZE;
319 321
320 322
321 323 /*
322 324 * initialized by page_coloring_init().
323 325 */
324 326 uint_t page_colors;
325 327 uint_t page_colors_mask;
326 328 uint_t page_coloring_shift;
327 329 int cpu_page_colors;
328 330 static uint_t l2_colors;
329 331
330 332 /*
331 333 * Page freelists and cachelists are dynamically allocated once mnoderangecnt
332 334 * and page_colors are calculated from the l2 cache n-way set size. Within a
333 335 * mnode range, the page freelist and cachelist are hashed into bins based on
334 336 * color. This makes it easier to search for a page within a specific memory
335 337 * range.
336 338 */
337 339 #define PAGE_COLORS_MIN 16
338 340
339 341 page_t ****page_freelists;
340 342 page_t ***page_cachelists;
341 343
342 344
343 345 /*
344 346 * Used by page layer to know about page sizes
345 347 */
346 348 hw_pagesize_t hw_page_array[MAX_NUM_LEVEL + 1];
347 349
348 350 kmutex_t *fpc_mutex[NPC_MUTEX];
349 351 kmutex_t *cpc_mutex[NPC_MUTEX];
350 352
351 353 /* Lock to protect mnoderanges array for memory DR operations. */
352 354 static kmutex_t mnoderange_lock;
353 355
354 356 /*
355 357 * Only let one thread at a time try to coalesce large pages, to
356 358 * prevent them from working against each other.
357 359 */
358 360 static kmutex_t contig_lock;
359 361 #define CONTIG_LOCK() mutex_enter(&contig_lock);
360 362 #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
361 363
362 364 #define PFN_16M (mmu_btop((uint64_t)0x1000000))
363 365
364 366 /*
365 367 * Return the optimum page size for a given mapping
366 368 */
367 369 /*ARGSUSED*/
368 370 size_t
369 371 map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
370 372 {
371 373 level_t l = 0;
372 374 size_t pgsz = MMU_PAGESIZE;
373 375 size_t max_lpsize;
374 376 uint_t mszc;
375 377
376 378 ASSERT(maptype != MAPPGSZ_VA);
377 379
378 380 if (maptype != MAPPGSZ_ISM && physmem < privm_lpg_min_physmem) {
379 381 return (MMU_PAGESIZE);
380 382 }
381 383
382 384 switch (maptype) {
383 385 case MAPPGSZ_HEAP:
384 386 case MAPPGSZ_STK:
385 387 max_lpsize = memcntl ? mcntl0_lpsize : (maptype ==
386 388 MAPPGSZ_HEAP ? max_uheap_lpsize : max_ustack_lpsize);
387 389 if (max_lpsize == MMU_PAGESIZE) {
388 390 return (MMU_PAGESIZE);
389 391 }
390 392 if (len == 0) {
391 393 len = (maptype == MAPPGSZ_HEAP) ? p->p_brkbase +
392 394 p->p_brksize - p->p_bssbase : p->p_stksize;
393 395 }
394 396 len = (maptype == MAPPGSZ_HEAP) ? MAX(len,
395 397 default_uheap_lpsize) : MAX(len, default_ustack_lpsize);
396 398
397 399 /*
398 400 * use the pages size that best fits len
399 401 */
400 402 for (l = mmu.umax_page_level; l > 0; --l) {
401 403 if (LEVEL_SIZE(l) > max_lpsize || len < LEVEL_SIZE(l)) {
402 404 continue;
403 405 } else {
404 406 pgsz = LEVEL_SIZE(l);
405 407 }
406 408 break;
407 409 }
408 410
409 411 mszc = (maptype == MAPPGSZ_HEAP ? p->p_brkpageszc :
410 412 p->p_stkpageszc);
411 413 if (addr == 0 && (pgsz < hw_page_array[mszc].hp_size)) {
412 414 pgsz = hw_page_array[mszc].hp_size;
413 415 }
414 416 return (pgsz);
415 417
416 418 case MAPPGSZ_ISM:
417 419 for (l = mmu.umax_page_level; l > 0; --l) {
418 420 if (len >= LEVEL_SIZE(l))
419 421 return (LEVEL_SIZE(l));
420 422 }
421 423 return (LEVEL_SIZE(0));
422 424 }
423 425 return (pgsz);
424 426 }
425 427
426 428 static uint_t
427 429 map_szcvec(caddr_t addr, size_t size, uintptr_t off, size_t max_lpsize,
428 430 size_t min_physmem)
429 431 {
430 432 caddr_t eaddr = addr + size;
431 433 uint_t szcvec = 0;
432 434 caddr_t raddr;
433 435 caddr_t readdr;
434 436 size_t pgsz;
435 437 int i;
436 438
437 439 if (physmem < min_physmem || max_lpsize <= MMU_PAGESIZE) {
438 440 return (0);
439 441 }
440 442
441 443 for (i = mmu_exported_page_sizes - 1; i > 0; i--) {
442 444 pgsz = page_get_pagesize(i);
443 445 if (pgsz > max_lpsize) {
444 446 continue;
445 447 }
446 448 raddr = (caddr_t)P2ROUNDUP((uintptr_t)addr, pgsz);
447 449 readdr = (caddr_t)P2ALIGN((uintptr_t)eaddr, pgsz);
448 450 if (raddr < addr || raddr >= readdr) {
449 451 continue;
450 452 }
451 453 if (P2PHASE((uintptr_t)addr ^ off, pgsz)) {
452 454 continue;
453 455 }
454 456 /*
455 457 * Set szcvec to the remaining page sizes.
456 458 */
457 459 szcvec = ((1 << (i + 1)) - 1) & ~1;
458 460 break;
459 461 }
460 462 return (szcvec);
461 463 }
462 464
463 465 /*
464 466 * Return a bit vector of large page size codes that
465 467 * can be used to map [addr, addr + len) region.
466 468 */
467 469 /*ARGSUSED*/
468 470 uint_t
469 471 map_pgszcvec(caddr_t addr, size_t size, uintptr_t off, int flags, int type,
470 472 int memcntl)
471 473 {
472 474 size_t max_lpsize = mcntl0_lpsize;
473 475
474 476 if (mmu.max_page_level == 0)
475 477 return (0);
476 478
477 479 if (flags & MAP_TEXT) {
478 480 if (!memcntl)
479 481 max_lpsize = max_utext_lpsize;
480 482 return (map_szcvec(addr, size, off, max_lpsize,
481 483 shm_lpg_min_physmem));
482 484
483 485 } else if (flags & MAP_INITDATA) {
484 486 if (!memcntl)
485 487 max_lpsize = max_uidata_lpsize;
486 488 return (map_szcvec(addr, size, off, max_lpsize,
487 489 privm_lpg_min_physmem));
488 490
489 491 } else if (type == MAPPGSZC_SHM) {
490 492 if (!memcntl)
491 493 max_lpsize = max_shm_lpsize;
492 494 return (map_szcvec(addr, size, off, max_lpsize,
493 495 shm_lpg_min_physmem));
494 496
495 497 } else if (type == MAPPGSZC_HEAP) {
496 498 if (!memcntl)
497 499 max_lpsize = max_uheap_lpsize;
498 500 return (map_szcvec(addr, size, off, max_lpsize,
499 501 privm_lpg_min_physmem));
500 502
501 503 } else if (type == MAPPGSZC_STACK) {
502 504 if (!memcntl)
503 505 max_lpsize = max_ustack_lpsize;
504 506 return (map_szcvec(addr, size, off, max_lpsize,
505 507 privm_lpg_min_physmem));
506 508
507 509 } else {
508 510 if (!memcntl)
509 511 max_lpsize = max_privmap_lpsize;
510 512 return (map_szcvec(addr, size, off, max_lpsize,
511 513 privm_lpg_min_physmem));
512 514 }
513 515 }
514 516
515 517 /*
516 518 * Handle a pagefault.
517 519 */
518 520 faultcode_t
519 521 pagefault(
520 522 caddr_t addr,
521 523 enum fault_type type,
522 524 enum seg_rw rw,
523 525 int iskernel)
524 526 {
525 527 struct as *as;
526 528 struct hat *hat;
527 529 struct proc *p;
528 530 kthread_t *t;
529 531 faultcode_t res;
530 532 caddr_t base;
531 533 size_t len;
532 534 int err;
533 535 int mapped_red;
534 536 uintptr_t ea;
535 537
536 538 ASSERT_STACK_ALIGNED();
537 539
538 540 if (INVALID_VADDR(addr))
539 541 return (FC_NOMAP);
540 542
541 543 mapped_red = segkp_map_red();
542 544
543 545 if (iskernel) {
544 546 as = &kas;
545 547 hat = as->a_hat;
546 548 } else {
547 549 t = curthread;
548 550 p = ttoproc(t);
549 551 as = p->p_as;
550 552 hat = as->a_hat;
551 553 }
552 554
553 555 /*
554 556 * Dispatch pagefault.
555 557 */
556 558 res = as_fault(hat, as, addr, 1, type, rw);
557 559
558 560 /*
559 561 * If this isn't a potential unmapped hole in the user's
560 562 * UNIX data or stack segments, just return status info.
561 563 */
562 564 if (res != FC_NOMAP || iskernel)
563 565 goto out;
564 566
565 567 /*
566 568 * Check to see if we happened to faulted on a currently unmapped
567 569 * part of the UNIX data or stack segments. If so, create a zfod
568 570 * mapping there and then try calling the fault routine again.
569 571 */
570 572 base = p->p_brkbase;
571 573 len = p->p_brksize;
572 574
573 575 if (addr < base || addr >= base + len) { /* data seg? */
574 576 base = (caddr_t)p->p_usrstack - p->p_stksize;
575 577 len = p->p_stksize;
576 578 if (addr < base || addr >= p->p_usrstack) { /* stack seg? */
577 579 /* not in either UNIX data or stack segments */
578 580 res = FC_NOMAP;
579 581 goto out;
580 582 }
581 583 }
582 584
583 585 /*
584 586 * the rest of this function implements a 3.X 4.X 5.X compatibility
585 587 * This code is probably not needed anymore
586 588 */
587 589 if (p->p_model == DATAMODEL_ILP32) {
588 590
589 591 /* expand the gap to the page boundaries on each side */
590 592 ea = P2ROUNDUP((uintptr_t)base + len, MMU_PAGESIZE);
591 593 base = (caddr_t)P2ALIGN((uintptr_t)base, MMU_PAGESIZE);
592 594 len = ea - (uintptr_t)base;
593 595
594 596 as_rangelock(as);
595 597 if (as_gap(as, MMU_PAGESIZE, &base, &len, AH_CONTAIN, addr) ==
596 598 0) {
597 599 err = as_map(as, base, len, segvn_create, zfod_argsp);
598 600 as_rangeunlock(as);
599 601 if (err) {
600 602 res = FC_MAKE_ERR(err);
601 603 goto out;
602 604 }
603 605 } else {
604 606 /*
605 607 * This page is already mapped by another thread after
606 608 * we returned from as_fault() above. We just fall
607 609 * through as_fault() below.
608 610 */
609 611 as_rangeunlock(as);
610 612 }
611 613
612 614 res = as_fault(hat, as, addr, 1, F_INVAL, rw);
613 615 }
614 616
615 617 out:
616 618 if (mapped_red)
617 619 segkp_unmap_red();
618 620
619 621 return (res);
620 622 }
621 623
622 624 void
623 625 map_addr(caddr_t *addrp, size_t len, offset_t off, int vacalign, uint_t flags)
624 626 {
625 627 struct proc *p = curproc;
626 628 caddr_t userlimit = (flags & _MAP_LOW32) ?
627 629 (caddr_t)_userlimit32 : p->p_as->a_userlimit;
628 630
629 631 map_addr_proc(addrp, len, off, vacalign, userlimit, curproc, flags);
↓ open down ↓ |
537 lines elided |
↑ open up ↑ |
630 632 }
631 633
632 634 /*ARGSUSED*/
633 635 int
634 636 map_addr_vacalign_check(caddr_t addr, u_offset_t off)
635 637 {
636 638 return (0);
637 639 }
638 640
639 641 /*
642 + * The maximum amount a randomized mapping will be slewed. We should perhaps
643 + * arrange things so these tunables can be separate for mmap, mmapobj, and
644 + * ld.so
645 + */
646 +size_t aslr_max_map_skew = 256 * 1024 * 1024; /* 256MB */
647 +
648 +/*
640 649 * map_addr_proc() is the routine called when the system is to
641 650 * choose an address for the user. We will pick an address
642 651 * range which is the highest available below userlimit.
643 652 *
644 653 * Every mapping will have a redzone of a single page on either side of
645 654 * the request. This is done to leave one page unmapped between segments.
646 655 * This is not required, but it's useful for the user because if their
647 656 * program strays across a segment boundary, it will catch a fault
648 657 * immediately making debugging a little easier. Currently the redzone
649 658 * is mandatory.
650 659 *
651 660 * addrp is a value/result parameter.
652 661 * On input it is a hint from the user to be used in a completely
653 662 * machine dependent fashion. We decide to completely ignore this hint.
654 663 * If MAP_ALIGN was specified, addrp contains the minimal alignment, which
655 664 * must be some "power of two" multiple of pagesize.
656 665 *
657 666 * On output it is NULL if no address can be found in the current
658 667 * processes address space or else an address that is currently
659 668 * not mapped for len bytes with a page of red zone on either side.
660 669 *
661 670 * vacalign is not needed on x86 (it's for viturally addressed caches)
662 671 */
663 672 /*ARGSUSED*/
664 673 void
665 674 map_addr_proc(
666 675 caddr_t *addrp,
667 676 size_t len,
668 677 offset_t off,
669 678 int vacalign,
670 679 caddr_t userlimit,
671 680 struct proc *p,
672 681 uint_t flags)
673 682 {
674 683 struct as *as = p->p_as;
675 684 caddr_t addr;
676 685 caddr_t base;
677 686 size_t slen;
678 687 size_t align_amount;
679 688
680 689 ASSERT32(userlimit == as->a_userlimit);
681 690
682 691 base = p->p_brkbase;
683 692 #if defined(__amd64)
684 693 /*
685 694 * XX64 Yes, this needs more work.
686 695 */
687 696 if (p->p_model == DATAMODEL_NATIVE) {
688 697 if (userlimit < as->a_userlimit) {
689 698 /*
690 699 * This happens when a program wants to map
691 700 * something in a range that's accessible to a
692 701 * program in a smaller address space. For example,
693 702 * a 64-bit program calling mmap32(2) to guarantee
694 703 * that the returned address is below 4Gbytes.
695 704 */
696 705 ASSERT((uintptr_t)userlimit < ADDRESS_C(0xffffffff));
697 706
698 707 if (userlimit > base)
699 708 slen = userlimit - base;
700 709 else {
701 710 *addrp = NULL;
702 711 return;
703 712 }
704 713 } else {
705 714 /*
706 715 * XX64 This layout is probably wrong .. but in
707 716 * the event we make the amd64 address space look
708 717 * like sparcv9 i.e. with the stack -above- the
709 718 * heap, this bit of code might even be correct.
710 719 */
711 720 slen = p->p_usrstack - base -
712 721 ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK);
713 722 }
714 723 } else
715 724 #endif
716 725 slen = userlimit - base;
717 726
718 727 /* Make len be a multiple of PAGESIZE */
719 728 len = (len + PAGEOFFSET) & PAGEMASK;
720 729
721 730 /*
722 731 * figure out what the alignment should be
723 732 *
724 733 * XX64 -- is there an ELF_AMD64_MAXPGSZ or is it the same????
725 734 */
726 735 if (len <= ELF_386_MAXPGSZ) {
727 736 /*
728 737 * Align virtual addresses to ensure that ELF shared libraries
729 738 * are mapped with the appropriate alignment constraints by
730 739 * the run-time linker.
731 740 */
732 741 align_amount = ELF_386_MAXPGSZ;
733 742 } else {
734 743 /*
735 744 * For 32-bit processes, only those which have specified
736 745 * MAP_ALIGN and an addr will be aligned on a larger page size.
737 746 * Not doing so can potentially waste up to 1G of process
738 747 * address space.
739 748 */
740 749 int lvl = (p->p_model == DATAMODEL_ILP32) ? 1 :
741 750 mmu.umax_page_level;
742 751
743 752 while (lvl && len < LEVEL_SIZE(lvl))
744 753 --lvl;
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
745 754
746 755 align_amount = LEVEL_SIZE(lvl);
747 756 }
748 757 if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount))
749 758 align_amount = (uintptr_t)*addrp;
750 759
751 760 ASSERT(ISP2(align_amount));
752 761 ASSERT(align_amount == 0 || align_amount >= PAGESIZE);
753 762
754 763 off = off & (align_amount - 1);
764 +
755 765 /*
756 766 * Look for a large enough hole starting below userlimit.
757 767 * After finding it, use the upper part.
758 768 */
759 769 if (as_gap_aligned(as, len, &base, &slen, AH_HI, NULL, align_amount,
760 770 PAGESIZE, off) == 0) {
761 771 caddr_t as_addr;
762 772
763 773 /*
764 774 * addr is the highest possible address to use since we have
765 775 * a PAGESIZE redzone at the beginning and end.
766 776 */
767 777 addr = base + slen - (PAGESIZE + len);
768 778 as_addr = addr;
769 779 /*
770 780 * Round address DOWN to the alignment amount and
771 781 * add the offset in.
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
772 782 * If addr is greater than as_addr, len would not be large
773 783 * enough to include the redzone, so we must adjust down
774 784 * by the alignment amount.
775 785 */
776 786 addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1)));
777 787 addr += (uintptr_t)off;
778 788 if (addr > as_addr) {
779 789 addr -= align_amount;
780 790 }
781 791
792 + /*
793 + * If randomization is requested, slew the allocation
794 + * backwards, within the same gap, by a random amount.
795 + */
796 + if (flags & _MAP_RANDOMIZE) {
797 + uint32_t slew;
798 +
799 + (void) random_get_pseudo_bytes((uint8_t *)&slew,
800 + sizeof (slew));
801 +
802 + slew = slew % MIN(aslr_max_map_skew, (addr - base));
803 + addr -= P2ALIGN(slew, align_amount);
804 + }
805 +
782 806 ASSERT(addr > base);
783 807 ASSERT(addr + len < base + slen);
784 808 ASSERT(((uintptr_t)addr & (align_amount - 1)) ==
785 809 ((uintptr_t)(off)));
786 810 *addrp = addr;
787 811 } else {
788 812 *addrp = NULL; /* no more virtual space */
789 813 }
790 814 }
791 815
792 816 int valid_va_range_aligned_wraparound;
793 817
794 818 /*
795 819 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
796 820 * addresses at least "minlen" long, where the base of the range is at "off"
797 821 * phase from an "align" boundary and there is space for a "redzone"-sized
798 822 * redzone on either side of the range. On success, 1 is returned and *basep
799 823 * and *lenp are adjusted to describe the acceptable range (including
800 824 * the redzone). On failure, 0 is returned.
801 825 */
802 826 /*ARGSUSED3*/
803 827 int
804 828 valid_va_range_aligned(caddr_t *basep, size_t *lenp, size_t minlen, int dir,
805 829 size_t align, size_t redzone, size_t off)
806 830 {
807 831 uintptr_t hi, lo;
808 832 size_t tot_len;
809 833
810 834 ASSERT(align == 0 ? off == 0 : off < align);
811 835 ASSERT(ISP2(align));
812 836 ASSERT(align == 0 || align >= PAGESIZE);
813 837
814 838 lo = (uintptr_t)*basep;
815 839 hi = lo + *lenp;
816 840 tot_len = minlen + 2 * redzone; /* need at least this much space */
817 841
818 842 /*
819 843 * If hi rolled over the top, try cutting back.
820 844 */
821 845 if (hi < lo) {
822 846 *lenp = 0UL - lo - 1UL;
823 847 /* See if this really happens. If so, then we figure out why */
824 848 valid_va_range_aligned_wraparound++;
825 849 hi = lo + *lenp;
826 850 }
827 851 if (*lenp < tot_len) {
828 852 return (0);
829 853 }
830 854
831 855 #if defined(__amd64)
832 856 /*
833 857 * Deal with a possible hole in the address range between
834 858 * hole_start and hole_end that should never be mapped.
835 859 */
836 860 if (lo < hole_start) {
837 861 if (hi > hole_start) {
838 862 if (hi < hole_end) {
839 863 hi = hole_start;
840 864 } else {
841 865 /* lo < hole_start && hi >= hole_end */
842 866 if (dir == AH_LO) {
843 867 /*
844 868 * prefer lowest range
845 869 */
846 870 if (hole_start - lo >= tot_len)
847 871 hi = hole_start;
848 872 else if (hi - hole_end >= tot_len)
849 873 lo = hole_end;
850 874 else
851 875 return (0);
852 876 } else {
853 877 /*
854 878 * prefer highest range
855 879 */
856 880 if (hi - hole_end >= tot_len)
857 881 lo = hole_end;
858 882 else if (hole_start - lo >= tot_len)
859 883 hi = hole_start;
860 884 else
861 885 return (0);
862 886 }
863 887 }
864 888 }
865 889 } else {
866 890 /* lo >= hole_start */
867 891 if (hi < hole_end)
868 892 return (0);
869 893 if (lo < hole_end)
870 894 lo = hole_end;
871 895 }
872 896 #endif
873 897
874 898 if (hi - lo < tot_len)
875 899 return (0);
876 900
877 901 if (align > 1) {
878 902 uintptr_t tlo = lo + redzone;
879 903 uintptr_t thi = hi - redzone;
880 904 tlo = (uintptr_t)P2PHASEUP(tlo, align, off);
881 905 if (tlo < lo + redzone) {
882 906 return (0);
883 907 }
884 908 if (thi < tlo || thi - tlo < minlen) {
885 909 return (0);
886 910 }
887 911 }
888 912
889 913 *basep = (caddr_t)lo;
890 914 *lenp = hi - lo;
891 915 return (1);
892 916 }
893 917
894 918 /*
895 919 * Determine whether [*basep, *basep + *lenp) contains a mappable range of
896 920 * addresses at least "minlen" long. On success, 1 is returned and *basep
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
897 921 * and *lenp are adjusted to describe the acceptable range. On failure, 0
898 922 * is returned.
899 923 */
900 924 int
901 925 valid_va_range(caddr_t *basep, size_t *lenp, size_t minlen, int dir)
902 926 {
903 927 return (valid_va_range_aligned(basep, lenp, minlen, dir, 0, 0, 0));
904 928 }
905 929
906 930 /*
931 + * Default to forbidding the first 64k of address space. This protects most
932 + * reasonably sized structures from dereferences through NULL:
933 + * ((foo_t *)0)->bar
934 + */
935 +uintptr_t forbidden_null_mapping_sz = 0x10000;
936 +
937 +/*
907 938 * Determine whether [addr, addr+len] are valid user addresses.
908 939 */
909 940 /*ARGSUSED*/
910 941 int
911 942 valid_usr_range(caddr_t addr, size_t len, uint_t prot, struct as *as,
912 943 caddr_t userlimit)
913 944 {
914 945 caddr_t eaddr = addr + len;
915 946
916 947 if (eaddr <= addr || addr >= userlimit || eaddr > userlimit)
917 948 return (RANGE_BADADDR);
918 949
950 + if ((addr <= (caddr_t)forbidden_null_mapping_sz) &&
951 + secflag_enabled(as->a_proc, PROC_SEC_FORBIDNULLMAP))
952 + return (RANGE_BADADDR);
953 +
919 954 #if defined(__amd64)
920 955 /*
921 956 * Check for the VA hole
922 957 */
923 958 if (eaddr > (caddr_t)hole_start && addr < (caddr_t)hole_end)
924 959 return (RANGE_BADADDR);
925 960 #endif
926 961
927 962 return (RANGE_OKAY);
928 963 }
929 964
930 965 /*
931 966 * Return 1 if the page frame is onboard memory, else 0.
932 967 */
933 968 int
934 969 pf_is_memory(pfn_t pf)
935 970 {
936 971 if (pfn_is_foreign(pf))
937 972 return (0);
938 973 return (address_in_memlist(phys_install, pfn_to_pa(pf), 1));
939 974 }
940 975
941 976 /*
942 977 * return the memrange containing pfn
943 978 */
944 979 int
945 980 memrange_num(pfn_t pfn)
946 981 {
947 982 int n;
948 983
949 984 for (n = 0; n < nranges - 1; ++n) {
950 985 if (pfn >= memranges[n])
951 986 break;
952 987 }
953 988 return (n);
954 989 }
955 990
956 991 /*
957 992 * return the mnoderange containing pfn
958 993 */
959 994 /*ARGSUSED*/
960 995 int
961 996 pfn_2_mtype(pfn_t pfn)
962 997 {
963 998 #if defined(__xpv)
964 999 return (0);
965 1000 #else
966 1001 int n;
967 1002
968 1003 /* Always start from highest pfn and work our way down */
969 1004 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
970 1005 if (pfn >= mnoderanges[n].mnr_pfnlo) {
971 1006 break;
972 1007 }
973 1008 }
974 1009 return (n);
975 1010 #endif
976 1011 }
977 1012
978 1013 #if !defined(__xpv)
979 1014 /*
980 1015 * is_contigpage_free:
981 1016 * returns a page list of contiguous pages. It minimally has to return
982 1017 * minctg pages. Caller determines minctg based on the scatter-gather
983 1018 * list length.
984 1019 *
985 1020 * pfnp is set to the next page frame to search on return.
986 1021 */
987 1022 static page_t *
988 1023 is_contigpage_free(
989 1024 pfn_t *pfnp,
990 1025 pgcnt_t *pgcnt,
991 1026 pgcnt_t minctg,
992 1027 uint64_t pfnseg,
993 1028 int iolock)
994 1029 {
995 1030 int i = 0;
996 1031 pfn_t pfn = *pfnp;
997 1032 page_t *pp;
998 1033 page_t *plist = NULL;
999 1034
1000 1035 /*
1001 1036 * fail if pfn + minctg crosses a segment boundary.
1002 1037 * Adjust for next starting pfn to begin at segment boundary.
1003 1038 */
1004 1039
1005 1040 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg)) {
1006 1041 *pfnp = roundup(*pfnp, pfnseg + 1);
1007 1042 return (NULL);
1008 1043 }
1009 1044
1010 1045 do {
1011 1046 retry:
1012 1047 pp = page_numtopp_nolock(pfn + i);
1013 1048 if ((pp == NULL) || IS_DUMP_PAGE(pp) ||
1014 1049 (page_trylock(pp, SE_EXCL) == 0)) {
1015 1050 (*pfnp)++;
1016 1051 break;
1017 1052 }
1018 1053 if (page_pptonum(pp) != pfn + i) {
1019 1054 page_unlock(pp);
1020 1055 goto retry;
1021 1056 }
1022 1057
1023 1058 if (!(PP_ISFREE(pp))) {
1024 1059 page_unlock(pp);
1025 1060 (*pfnp)++;
1026 1061 break;
1027 1062 }
1028 1063
1029 1064 if (!PP_ISAGED(pp)) {
1030 1065 page_list_sub(pp, PG_CACHE_LIST);
1031 1066 page_hashout(pp, (kmutex_t *)NULL);
1032 1067 } else {
1033 1068 page_list_sub(pp, PG_FREE_LIST);
1034 1069 }
1035 1070
1036 1071 if (iolock)
1037 1072 page_io_lock(pp);
1038 1073 page_list_concat(&plist, &pp);
1039 1074
1040 1075 /*
1041 1076 * exit loop when pgcnt satisfied or segment boundary reached.
1042 1077 */
1043 1078
1044 1079 } while ((++i < *pgcnt) && ((pfn + i) & pfnseg));
1045 1080
1046 1081 *pfnp += i; /* set to next pfn to search */
1047 1082
1048 1083 if (i >= minctg) {
1049 1084 *pgcnt -= i;
1050 1085 return (plist);
1051 1086 }
1052 1087
1053 1088 /*
1054 1089 * failure: minctg not satisfied.
1055 1090 *
1056 1091 * if next request crosses segment boundary, set next pfn
1057 1092 * to search from the segment boundary.
1058 1093 */
1059 1094 if (((*pfnp + minctg - 1) & pfnseg) < (*pfnp & pfnseg))
1060 1095 *pfnp = roundup(*pfnp, pfnseg + 1);
1061 1096
1062 1097 /* clean up any pages already allocated */
1063 1098
1064 1099 while (plist) {
1065 1100 pp = plist;
1066 1101 page_sub(&plist, pp);
1067 1102 page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
1068 1103 if (iolock)
1069 1104 page_io_unlock(pp);
1070 1105 page_unlock(pp);
1071 1106 }
1072 1107
1073 1108 return (NULL);
1074 1109 }
1075 1110 #endif /* !__xpv */
1076 1111
1077 1112 /*
1078 1113 * verify that pages being returned from allocator have correct DMA attribute
1079 1114 */
1080 1115 #ifndef DEBUG
1081 1116 #define check_dma(a, b, c) (void)(0)
1082 1117 #else
1083 1118 static void
1084 1119 check_dma(ddi_dma_attr_t *dma_attr, page_t *pp, int cnt)
1085 1120 {
1086 1121 if (dma_attr == NULL)
1087 1122 return;
1088 1123
1089 1124 while (cnt-- > 0) {
1090 1125 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) <
1091 1126 dma_attr->dma_attr_addr_lo)
1092 1127 panic("PFN (pp=%p) below dma_attr_addr_lo", (void *)pp);
1093 1128 if (pa_to_ma(pfn_to_pa(pp->p_pagenum)) >=
1094 1129 dma_attr->dma_attr_addr_hi)
1095 1130 panic("PFN (pp=%p) above dma_attr_addr_hi", (void *)pp);
1096 1131 pp = pp->p_next;
1097 1132 }
1098 1133 }
1099 1134 #endif
1100 1135
1101 1136 #if !defined(__xpv)
1102 1137 static page_t *
1103 1138 page_get_contigpage(pgcnt_t *pgcnt, ddi_dma_attr_t *mattr, int iolock)
1104 1139 {
1105 1140 pfn_t pfn;
1106 1141 int sgllen;
1107 1142 uint64_t pfnseg;
1108 1143 pgcnt_t minctg;
1109 1144 page_t *pplist = NULL, *plist;
1110 1145 uint64_t lo, hi;
1111 1146 pgcnt_t pfnalign = 0;
1112 1147 static pfn_t startpfn;
1113 1148 static pgcnt_t lastctgcnt;
1114 1149 uintptr_t align;
1115 1150
1116 1151 CONTIG_LOCK();
1117 1152
1118 1153 if (mattr) {
1119 1154 lo = mmu_btop((mattr->dma_attr_addr_lo + MMU_PAGEOFFSET));
1120 1155 hi = mmu_btop(mattr->dma_attr_addr_hi);
1121 1156 if (hi >= physmax)
1122 1157 hi = physmax - 1;
1123 1158 sgllen = mattr->dma_attr_sgllen;
1124 1159 pfnseg = mmu_btop(mattr->dma_attr_seg);
1125 1160
1126 1161 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
1127 1162 if (align > MMU_PAGESIZE)
1128 1163 pfnalign = mmu_btop(align);
1129 1164
1130 1165 /*
1131 1166 * in order to satisfy the request, must minimally
1132 1167 * acquire minctg contiguous pages
1133 1168 */
1134 1169 minctg = howmany(*pgcnt, sgllen);
1135 1170
1136 1171 ASSERT(hi >= lo);
1137 1172
1138 1173 /*
1139 1174 * start from where last searched if the minctg >= lastctgcnt
1140 1175 */
1141 1176 if (minctg < lastctgcnt || startpfn < lo || startpfn > hi)
1142 1177 startpfn = lo;
1143 1178 } else {
1144 1179 hi = physmax - 1;
1145 1180 lo = 0;
1146 1181 sgllen = 1;
1147 1182 pfnseg = mmu.highest_pfn;
1148 1183 minctg = *pgcnt;
1149 1184
1150 1185 if (minctg < lastctgcnt)
1151 1186 startpfn = lo;
1152 1187 }
1153 1188 lastctgcnt = minctg;
1154 1189
1155 1190 ASSERT(pfnseg + 1 >= (uint64_t)minctg);
1156 1191
1157 1192 /* conserve 16m memory - start search above 16m when possible */
1158 1193 if (hi > PFN_16M && startpfn < PFN_16M)
1159 1194 startpfn = PFN_16M;
1160 1195
1161 1196 pfn = startpfn;
1162 1197 if (pfnalign)
1163 1198 pfn = P2ROUNDUP(pfn, pfnalign);
1164 1199
1165 1200 while (pfn + minctg - 1 <= hi) {
1166 1201
1167 1202 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1168 1203 if (plist) {
1169 1204 page_list_concat(&pplist, &plist);
1170 1205 sgllen--;
1171 1206 /*
1172 1207 * return when contig pages no longer needed
1173 1208 */
1174 1209 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1175 1210 startpfn = pfn;
1176 1211 CONTIG_UNLOCK();
1177 1212 check_dma(mattr, pplist, *pgcnt);
1178 1213 return (pplist);
1179 1214 }
1180 1215 minctg = howmany(*pgcnt, sgllen);
1181 1216 }
1182 1217 if (pfnalign)
1183 1218 pfn = P2ROUNDUP(pfn, pfnalign);
1184 1219 }
1185 1220
1186 1221 /* cannot find contig pages in specified range */
1187 1222 if (startpfn == lo) {
1188 1223 CONTIG_UNLOCK();
1189 1224 return (NULL);
1190 1225 }
1191 1226
1192 1227 /* did not start with lo previously */
1193 1228 pfn = lo;
1194 1229 if (pfnalign)
1195 1230 pfn = P2ROUNDUP(pfn, pfnalign);
1196 1231
1197 1232 /* allow search to go above startpfn */
1198 1233 while (pfn < startpfn) {
1199 1234
1200 1235 plist = is_contigpage_free(&pfn, pgcnt, minctg, pfnseg, iolock);
1201 1236 if (plist != NULL) {
1202 1237
1203 1238 page_list_concat(&pplist, &plist);
1204 1239 sgllen--;
1205 1240
1206 1241 /*
1207 1242 * return when contig pages no longer needed
1208 1243 */
1209 1244 if (!*pgcnt || ((*pgcnt <= sgllen) && !pfnalign)) {
1210 1245 startpfn = pfn;
1211 1246 CONTIG_UNLOCK();
1212 1247 check_dma(mattr, pplist, *pgcnt);
1213 1248 return (pplist);
1214 1249 }
1215 1250 minctg = howmany(*pgcnt, sgllen);
1216 1251 }
1217 1252 if (pfnalign)
1218 1253 pfn = P2ROUNDUP(pfn, pfnalign);
1219 1254 }
1220 1255 CONTIG_UNLOCK();
1221 1256 return (NULL);
1222 1257 }
1223 1258 #endif /* !__xpv */
1224 1259
1225 1260 /*
1226 1261 * mnode_range_cnt() calculates the number of memory ranges for mnode and
1227 1262 * memranges[]. Used to determine the size of page lists and mnoderanges.
1228 1263 */
1229 1264 int
1230 1265 mnode_range_cnt(int mnode)
1231 1266 {
1232 1267 #if defined(__xpv)
1233 1268 ASSERT(mnode == 0);
1234 1269 return (1);
1235 1270 #else /* __xpv */
1236 1271 int mri;
1237 1272 int mnrcnt = 0;
1238 1273
1239 1274 if (mem_node_config[mnode].exists != 0) {
1240 1275 mri = nranges - 1;
1241 1276
1242 1277 /* find the memranges index below contained in mnode range */
1243 1278
1244 1279 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1245 1280 mri--;
1246 1281
1247 1282 /*
1248 1283 * increment mnode range counter when memranges or mnode
1249 1284 * boundary is reached.
1250 1285 */
1251 1286 while (mri >= 0 &&
1252 1287 mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1253 1288 mnrcnt++;
1254 1289 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1255 1290 mri--;
1256 1291 else
1257 1292 break;
1258 1293 }
1259 1294 }
1260 1295 ASSERT(mnrcnt <= MAX_MNODE_MRANGES);
1261 1296 return (mnrcnt);
1262 1297 #endif /* __xpv */
1263 1298 }
1264 1299
1265 1300 /*
1266 1301 * mnode_range_setup() initializes mnoderanges.
1267 1302 */
1268 1303 void
1269 1304 mnode_range_setup(mnoderange_t *mnoderanges)
1270 1305 {
1271 1306 mnoderange_t *mp = mnoderanges;
1272 1307 int mnode, mri;
1273 1308 int mindex = 0; /* current index into mnoderanges array */
1274 1309 int i, j;
1275 1310 pfn_t hipfn;
1276 1311 int last, hi;
1277 1312
1278 1313 for (mnode = 0; mnode < max_mem_nodes; mnode++) {
1279 1314 if (mem_node_config[mnode].exists == 0)
1280 1315 continue;
1281 1316
1282 1317 mri = nranges - 1;
1283 1318
1284 1319 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1285 1320 mri--;
1286 1321
1287 1322 while (mri >= 0 && mem_node_config[mnode].physmax >=
1288 1323 MEMRANGELO(mri)) {
1289 1324 mnoderanges->mnr_pfnlo = MAX(MEMRANGELO(mri),
1290 1325 mem_node_config[mnode].physbase);
1291 1326 mnoderanges->mnr_pfnhi = MIN(MEMRANGEHI(mri),
1292 1327 mem_node_config[mnode].physmax);
1293 1328 mnoderanges->mnr_mnode = mnode;
1294 1329 mnoderanges->mnr_memrange = mri;
1295 1330 mnoderanges->mnr_exists = 1;
1296 1331 mnoderanges++;
1297 1332 mindex++;
1298 1333 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1299 1334 mri--;
1300 1335 else
1301 1336 break;
1302 1337 }
1303 1338 }
1304 1339
1305 1340 /*
1306 1341 * For now do a simple sort of the mnoderanges array to fill in
1307 1342 * the mnr_next fields. Since mindex is expected to be relatively
1308 1343 * small, using a simple O(N^2) algorithm.
1309 1344 */
1310 1345 for (i = 0; i < mindex; i++) {
1311 1346 if (mp[i].mnr_pfnlo == 0) /* find lowest */
1312 1347 break;
1313 1348 }
1314 1349 ASSERT(i < mindex);
1315 1350 last = i;
1316 1351 mtype16m = last;
1317 1352 mp[last].mnr_next = -1;
1318 1353 for (i = 0; i < mindex - 1; i++) {
1319 1354 hipfn = (pfn_t)(-1);
1320 1355 hi = -1;
1321 1356 /* find next highest mnode range */
1322 1357 for (j = 0; j < mindex; j++) {
1323 1358 if (mp[j].mnr_pfnlo > mp[last].mnr_pfnlo &&
1324 1359 mp[j].mnr_pfnlo < hipfn) {
1325 1360 hipfn = mp[j].mnr_pfnlo;
1326 1361 hi = j;
1327 1362 }
1328 1363 }
1329 1364 mp[hi].mnr_next = last;
1330 1365 last = hi;
1331 1366 }
1332 1367 mtypetop = last;
1333 1368 }
1334 1369
1335 1370 #ifndef __xpv
1336 1371 /*
1337 1372 * Update mnoderanges for memory hot-add DR operations.
1338 1373 */
1339 1374 static void
1340 1375 mnode_range_add(int mnode)
1341 1376 {
1342 1377 int *prev;
1343 1378 int n, mri;
1344 1379 pfn_t start, end;
1345 1380 extern void membar_sync(void);
1346 1381
1347 1382 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1348 1383 ASSERT(mem_node_config[mnode].exists);
1349 1384 start = mem_node_config[mnode].physbase;
1350 1385 end = mem_node_config[mnode].physmax;
1351 1386 ASSERT(start <= end);
1352 1387 mutex_enter(&mnoderange_lock);
1353 1388
1354 1389 #ifdef DEBUG
1355 1390 /* Check whether it interleaves with other memory nodes. */
1356 1391 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1357 1392 ASSERT(mnoderanges[n].mnr_exists);
1358 1393 if (mnoderanges[n].mnr_mnode == mnode)
1359 1394 continue;
1360 1395 ASSERT(start > mnoderanges[n].mnr_pfnhi ||
1361 1396 end < mnoderanges[n].mnr_pfnlo);
1362 1397 }
1363 1398 #endif /* DEBUG */
1364 1399
1365 1400 mri = nranges - 1;
1366 1401 while (MEMRANGEHI(mri) < mem_node_config[mnode].physbase)
1367 1402 mri--;
1368 1403 while (mri >= 0 && mem_node_config[mnode].physmax >= MEMRANGELO(mri)) {
1369 1404 /* Check whether mtype already exists. */
1370 1405 for (n = mtypetop; n != -1; n = mnoderanges[n].mnr_next) {
1371 1406 if (mnoderanges[n].mnr_mnode == mnode &&
1372 1407 mnoderanges[n].mnr_memrange == mri) {
1373 1408 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri),
1374 1409 start);
1375 1410 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri),
1376 1411 end);
1377 1412 break;
1378 1413 }
1379 1414 }
1380 1415
1381 1416 /* Add a new entry if it doesn't exist yet. */
1382 1417 if (n == -1) {
1383 1418 /* Try to find an unused entry in mnoderanges array. */
1384 1419 for (n = 0; n < mnoderangecnt; n++) {
1385 1420 if (mnoderanges[n].mnr_exists == 0)
1386 1421 break;
1387 1422 }
1388 1423 ASSERT(n < mnoderangecnt);
1389 1424 mnoderanges[n].mnr_pfnlo = MAX(MEMRANGELO(mri), start);
1390 1425 mnoderanges[n].mnr_pfnhi = MIN(MEMRANGEHI(mri), end);
1391 1426 mnoderanges[n].mnr_mnode = mnode;
1392 1427 mnoderanges[n].mnr_memrange = mri;
1393 1428 mnoderanges[n].mnr_exists = 1;
1394 1429 /* Page 0 should always be present. */
1395 1430 for (prev = &mtypetop;
1396 1431 mnoderanges[*prev].mnr_pfnlo > start;
1397 1432 prev = &mnoderanges[*prev].mnr_next) {
1398 1433 ASSERT(mnoderanges[*prev].mnr_next >= 0);
1399 1434 ASSERT(mnoderanges[*prev].mnr_pfnlo > end);
1400 1435 }
1401 1436 mnoderanges[n].mnr_next = *prev;
1402 1437 membar_sync();
1403 1438 *prev = n;
1404 1439 }
1405 1440
1406 1441 if (mem_node_config[mnode].physmax > MEMRANGEHI(mri))
1407 1442 mri--;
1408 1443 else
1409 1444 break;
1410 1445 }
1411 1446
1412 1447 mutex_exit(&mnoderange_lock);
1413 1448 }
1414 1449
1415 1450 /*
1416 1451 * Update mnoderanges for memory hot-removal DR operations.
1417 1452 */
1418 1453 static void
1419 1454 mnode_range_del(int mnode)
1420 1455 {
1421 1456 _NOTE(ARGUNUSED(mnode));
1422 1457 ASSERT(0 <= mnode && mnode < max_mem_nodes);
1423 1458 /* TODO: support deletion operation. */
1424 1459 ASSERT(0);
1425 1460 }
1426 1461
1427 1462 void
1428 1463 plat_slice_add(pfn_t start, pfn_t end)
1429 1464 {
1430 1465 mem_node_add_slice(start, end);
1431 1466 if (plat_dr_enabled()) {
1432 1467 mnode_range_add(PFN_2_MEM_NODE(start));
1433 1468 }
1434 1469 }
1435 1470
1436 1471 void
1437 1472 plat_slice_del(pfn_t start, pfn_t end)
1438 1473 {
1439 1474 ASSERT(PFN_2_MEM_NODE(start) == PFN_2_MEM_NODE(end));
1440 1475 ASSERT(plat_dr_enabled());
1441 1476 mnode_range_del(PFN_2_MEM_NODE(start));
1442 1477 mem_node_del_slice(start, end);
1443 1478 }
1444 1479 #endif /* __xpv */
1445 1480
1446 1481 /*ARGSUSED*/
1447 1482 int
1448 1483 mtype_init(vnode_t *vp, caddr_t vaddr, uint_t *flags, size_t pgsz)
1449 1484 {
1450 1485 int mtype = mtypetop;
1451 1486
1452 1487 #if !defined(__xpv)
1453 1488 #if defined(__i386)
1454 1489 /*
1455 1490 * set the mtype range
1456 1491 * - kmem requests need to be below 4g if restricted_kmemalloc is set.
1457 1492 * - for non kmem requests, set range to above 4g if memory below 4g
1458 1493 * runs low.
1459 1494 */
1460 1495 if (restricted_kmemalloc && VN_ISKAS(vp) &&
1461 1496 (caddr_t)(vaddr) >= kernelheap &&
1462 1497 (caddr_t)(vaddr) < ekernelheap) {
1463 1498 ASSERT(physmax4g);
1464 1499 mtype = mtype4g;
1465 1500 if (RESTRICT16M_ALLOC(freemem4g - btop(pgsz),
1466 1501 btop(pgsz), *flags)) {
1467 1502 *flags |= PGI_MT_RANGE16M;
1468 1503 } else {
1469 1504 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1470 1505 VM_STAT_COND_ADD((*flags & PG_PANIC),
1471 1506 vmm_vmstats.pgpanicalloc);
1472 1507 *flags |= PGI_MT_RANGE0;
1473 1508 }
1474 1509 return (mtype);
1475 1510 }
1476 1511 #endif /* __i386 */
1477 1512
1478 1513 if (RESTRICT4G_ALLOC) {
1479 1514 VM_STAT_ADD(vmm_vmstats.restrict4gcnt);
1480 1515 /* here only for > 4g systems */
1481 1516 *flags |= PGI_MT_RANGE4G;
1482 1517 } else if (RESTRICT16M_ALLOC(freemem, btop(pgsz), *flags)) {
1483 1518 *flags |= PGI_MT_RANGE16M;
1484 1519 } else {
1485 1520 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1486 1521 VM_STAT_COND_ADD((*flags & PG_PANIC), vmm_vmstats.pgpanicalloc);
1487 1522 *flags |= PGI_MT_RANGE0;
1488 1523 }
1489 1524 #endif /* !__xpv */
1490 1525 return (mtype);
1491 1526 }
1492 1527
1493 1528
1494 1529 /* mtype init for page_get_replacement_page */
1495 1530 /*ARGSUSED*/
1496 1531 int
1497 1532 mtype_pgr_init(int *flags, page_t *pp, int mnode, pgcnt_t pgcnt)
1498 1533 {
1499 1534 int mtype = mtypetop;
1500 1535 #if !defined(__xpv)
1501 1536 if (RESTRICT16M_ALLOC(freemem, pgcnt, *flags)) {
1502 1537 *flags |= PGI_MT_RANGE16M;
1503 1538 } else {
1504 1539 VM_STAT_ADD(vmm_vmstats.unrestrict16mcnt);
1505 1540 *flags |= PGI_MT_RANGE0;
1506 1541 }
1507 1542 #endif
1508 1543 return (mtype);
1509 1544 }
1510 1545
1511 1546 /*
1512 1547 * Determine if the mnode range specified in mtype contains memory belonging
1513 1548 * to memory node mnode. If flags & PGI_MT_RANGE is set then mtype contains
1514 1549 * the range from high pfn to 0, 16m or 4g.
1515 1550 *
1516 1551 * Return first mnode range type index found otherwise return -1 if none found.
1517 1552 */
1518 1553 int
1519 1554 mtype_func(int mnode, int mtype, uint_t flags)
1520 1555 {
1521 1556 if (flags & PGI_MT_RANGE) {
1522 1557 int mnr_lim = MRI_0;
1523 1558
1524 1559 if (flags & PGI_MT_NEXT) {
1525 1560 mtype = mnoderanges[mtype].mnr_next;
1526 1561 }
1527 1562 if (flags & PGI_MT_RANGE4G)
1528 1563 mnr_lim = MRI_4G; /* exclude 0-4g range */
1529 1564 else if (flags & PGI_MT_RANGE16M)
1530 1565 mnr_lim = MRI_16M; /* exclude 0-16m range */
1531 1566 while (mtype != -1 &&
1532 1567 mnoderanges[mtype].mnr_memrange <= mnr_lim) {
1533 1568 if (mnoderanges[mtype].mnr_mnode == mnode)
1534 1569 return (mtype);
1535 1570 mtype = mnoderanges[mtype].mnr_next;
1536 1571 }
1537 1572 } else if (mnoderanges[mtype].mnr_mnode == mnode) {
1538 1573 return (mtype);
1539 1574 }
1540 1575 return (-1);
1541 1576 }
1542 1577
1543 1578 /*
1544 1579 * Update the page list max counts with the pfn range specified by the
1545 1580 * input parameters.
1546 1581 */
1547 1582 void
1548 1583 mtype_modify_max(pfn_t startpfn, long cnt)
1549 1584 {
1550 1585 int mtype;
1551 1586 pgcnt_t inc;
1552 1587 spgcnt_t scnt = (spgcnt_t)(cnt);
1553 1588 pgcnt_t acnt = ABS(scnt);
1554 1589 pfn_t endpfn = startpfn + acnt;
1555 1590 pfn_t pfn, lo;
1556 1591
1557 1592 if (!physmax4g)
1558 1593 return;
1559 1594
1560 1595 mtype = mtypetop;
1561 1596 for (pfn = endpfn; pfn > startpfn; ) {
1562 1597 ASSERT(mtype != -1);
1563 1598 lo = mnoderanges[mtype].mnr_pfnlo;
1564 1599 if (pfn > lo) {
1565 1600 if (startpfn >= lo) {
1566 1601 inc = pfn - startpfn;
1567 1602 } else {
1568 1603 inc = pfn - lo;
1569 1604 }
1570 1605 if (mnoderanges[mtype].mnr_memrange != MRI_4G) {
1571 1606 if (scnt > 0)
1572 1607 maxmem4g += inc;
1573 1608 else
1574 1609 maxmem4g -= inc;
1575 1610 }
1576 1611 pfn -= inc;
1577 1612 }
1578 1613 mtype = mnoderanges[mtype].mnr_next;
1579 1614 }
1580 1615 }
1581 1616
1582 1617 int
1583 1618 mtype_2_mrange(int mtype)
1584 1619 {
1585 1620 return (mnoderanges[mtype].mnr_memrange);
1586 1621 }
1587 1622
1588 1623 void
1589 1624 mnodetype_2_pfn(int mnode, int mtype, pfn_t *pfnlo, pfn_t *pfnhi)
1590 1625 {
1591 1626 _NOTE(ARGUNUSED(mnode));
1592 1627 ASSERT(mnoderanges[mtype].mnr_mnode == mnode);
1593 1628 *pfnlo = mnoderanges[mtype].mnr_pfnlo;
1594 1629 *pfnhi = mnoderanges[mtype].mnr_pfnhi;
1595 1630 }
1596 1631
1597 1632 size_t
1598 1633 plcnt_sz(size_t ctrs_sz)
1599 1634 {
1600 1635 #ifdef DEBUG
1601 1636 int szc, colors;
1602 1637
1603 1638 ctrs_sz += mnoderangecnt * sizeof (struct mnr_mts) * mmu_page_sizes;
1604 1639 for (szc = 0; szc < mmu_page_sizes; szc++) {
1605 1640 colors = page_get_pagecolors(szc);
1606 1641 ctrs_sz += mnoderangecnt * sizeof (pgcnt_t) * colors;
1607 1642 }
1608 1643 #endif
1609 1644 return (ctrs_sz);
1610 1645 }
1611 1646
1612 1647 caddr_t
1613 1648 plcnt_init(caddr_t addr)
1614 1649 {
1615 1650 #ifdef DEBUG
1616 1651 int mt, szc, colors;
1617 1652
1618 1653 for (mt = 0; mt < mnoderangecnt; mt++) {
1619 1654 mnoderanges[mt].mnr_mts = (struct mnr_mts *)addr;
1620 1655 addr += (sizeof (struct mnr_mts) * mmu_page_sizes);
1621 1656 for (szc = 0; szc < mmu_page_sizes; szc++) {
1622 1657 colors = page_get_pagecolors(szc);
1623 1658 mnoderanges[mt].mnr_mts[szc].mnr_mts_colors = colors;
1624 1659 mnoderanges[mt].mnr_mts[szc].mnr_mtsc_pgcnt =
1625 1660 (pgcnt_t *)addr;
1626 1661 addr += (sizeof (pgcnt_t) * colors);
1627 1662 }
1628 1663 }
1629 1664 #endif
1630 1665 return (addr);
1631 1666 }
1632 1667
1633 1668 void
1634 1669 plcnt_inc_dec(page_t *pp, int mtype, int szc, long cnt, int flags)
1635 1670 {
1636 1671 _NOTE(ARGUNUSED(pp));
1637 1672 #ifdef DEBUG
1638 1673 int bin = PP_2_BIN(pp);
1639 1674
1640 1675 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mts_pgcnt, cnt);
1641 1676 atomic_add_long(&mnoderanges[mtype].mnr_mts[szc].mnr_mtsc_pgcnt[bin],
1642 1677 cnt);
1643 1678 #endif
1644 1679 ASSERT(mtype == PP_2_MTYPE(pp));
1645 1680 if (physmax4g && mnoderanges[mtype].mnr_memrange != MRI_4G)
1646 1681 atomic_add_long(&freemem4g, cnt);
1647 1682 if (flags & PG_CACHE_LIST)
1648 1683 atomic_add_long(&mnoderanges[mtype].mnr_mt_clpgcnt, cnt);
1649 1684 else
1650 1685 atomic_add_long(&mnoderanges[mtype].mnr_mt_flpgcnt[szc], cnt);
1651 1686 atomic_add_long(&mnoderanges[mtype].mnr_mt_totcnt, cnt);
1652 1687 }
1653 1688
1654 1689 /*
1655 1690 * Returns the free page count for mnode
1656 1691 */
1657 1692 int
1658 1693 mnode_pgcnt(int mnode)
1659 1694 {
1660 1695 int mtype = mtypetop;
1661 1696 int flags = PGI_MT_RANGE0;
1662 1697 pgcnt_t pgcnt = 0;
1663 1698
1664 1699 mtype = mtype_func(mnode, mtype, flags);
1665 1700
1666 1701 while (mtype != -1) {
1667 1702 pgcnt += MTYPE_FREEMEM(mtype);
1668 1703 mtype = mtype_func(mnode, mtype, flags | PGI_MT_NEXT);
1669 1704 }
1670 1705 return (pgcnt);
1671 1706 }
1672 1707
1673 1708 /*
1674 1709 * Initialize page coloring variables based on the l2 cache parameters.
1675 1710 * Calculate and return memory needed for page coloring data structures.
1676 1711 */
1677 1712 size_t
1678 1713 page_coloring_init(uint_t l2_sz, int l2_linesz, int l2_assoc)
1679 1714 {
1680 1715 _NOTE(ARGUNUSED(l2_linesz));
1681 1716 size_t colorsz = 0;
1682 1717 int i;
1683 1718 int colors;
1684 1719
1685 1720 #if defined(__xpv)
1686 1721 /*
1687 1722 * Hypervisor domains currently don't have any concept of NUMA.
1688 1723 * Hence we'll act like there is only 1 memrange.
1689 1724 */
1690 1725 i = memrange_num(1);
1691 1726 #else /* !__xpv */
1692 1727 /*
1693 1728 * Reduce the memory ranges lists if we don't have large amounts
1694 1729 * of memory. This avoids searching known empty free lists.
1695 1730 * To support memory DR operations, we need to keep memory ranges
1696 1731 * for possible memory hot-add operations.
1697 1732 */
1698 1733 if (plat_dr_physmax > physmax)
1699 1734 i = memrange_num(plat_dr_physmax);
1700 1735 else
1701 1736 i = memrange_num(physmax);
1702 1737 #if defined(__i386)
1703 1738 if (i > MRI_4G)
1704 1739 restricted_kmemalloc = 0;
1705 1740 #endif
1706 1741 /* physmax greater than 4g */
1707 1742 if (i == MRI_4G)
1708 1743 physmax4g = 1;
1709 1744 #endif /* !__xpv */
1710 1745 memranges += i;
1711 1746 nranges -= i;
1712 1747
1713 1748 ASSERT(mmu_page_sizes <= MMU_PAGE_SIZES);
1714 1749
1715 1750 ASSERT(ISP2(l2_linesz));
1716 1751 ASSERT(l2_sz > MMU_PAGESIZE);
1717 1752
1718 1753 /* l2_assoc is 0 for fully associative l2 cache */
1719 1754 if (l2_assoc)
1720 1755 l2_colors = MAX(1, l2_sz / (l2_assoc * MMU_PAGESIZE));
1721 1756 else
1722 1757 l2_colors = 1;
1723 1758
1724 1759 ASSERT(ISP2(l2_colors));
1725 1760
1726 1761 /* for scalability, configure at least PAGE_COLORS_MIN color bins */
1727 1762 page_colors = MAX(l2_colors, PAGE_COLORS_MIN);
1728 1763
1729 1764 /*
1730 1765 * cpu_page_colors is non-zero when a page color may be spread across
1731 1766 * multiple bins.
1732 1767 */
1733 1768 if (l2_colors < page_colors)
1734 1769 cpu_page_colors = l2_colors;
1735 1770
1736 1771 ASSERT(ISP2(page_colors));
1737 1772
1738 1773 page_colors_mask = page_colors - 1;
1739 1774
1740 1775 ASSERT(ISP2(CPUSETSIZE()));
1741 1776 page_coloring_shift = lowbit(CPUSETSIZE());
1742 1777
1743 1778 /* initialize number of colors per page size */
1744 1779 for (i = 0; i <= mmu.max_page_level; i++) {
1745 1780 hw_page_array[i].hp_size = LEVEL_SIZE(i);
1746 1781 hw_page_array[i].hp_shift = LEVEL_SHIFT(i);
1747 1782 hw_page_array[i].hp_pgcnt = LEVEL_SIZE(i) >> LEVEL_SHIFT(0);
1748 1783 hw_page_array[i].hp_colors = (page_colors_mask >>
1749 1784 (hw_page_array[i].hp_shift - hw_page_array[0].hp_shift))
1750 1785 + 1;
1751 1786 colorequivszc[i] = 0;
1752 1787 }
1753 1788
1754 1789 /*
1755 1790 * The value of cpu_page_colors determines if additional color bins
1756 1791 * need to be checked for a particular color in the page_get routines.
1757 1792 */
1758 1793 if (cpu_page_colors != 0) {
1759 1794
1760 1795 int a = lowbit(page_colors) - lowbit(cpu_page_colors);
1761 1796 ASSERT(a > 0);
1762 1797 ASSERT(a < 16);
1763 1798
1764 1799 for (i = 0; i <= mmu.max_page_level; i++) {
1765 1800 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1766 1801 colorequivszc[i] = 0;
1767 1802 continue;
1768 1803 }
1769 1804 while ((colors >> a) == 0)
1770 1805 a--;
1771 1806 ASSERT(a >= 0);
1772 1807
1773 1808 /* higher 4 bits encodes color equiv mask */
1774 1809 colorequivszc[i] = (a << 4);
1775 1810 }
1776 1811 }
1777 1812
1778 1813 /* factor in colorequiv to check additional 'equivalent' bins. */
1779 1814 if (colorequiv > 1) {
1780 1815
1781 1816 int a = lowbit(colorequiv) - 1;
1782 1817 if (a > 15)
1783 1818 a = 15;
1784 1819
1785 1820 for (i = 0; i <= mmu.max_page_level; i++) {
1786 1821 if ((colors = hw_page_array[i].hp_colors) <= 1) {
1787 1822 continue;
1788 1823 }
1789 1824 while ((colors >> a) == 0)
1790 1825 a--;
1791 1826 if ((a << 4) > colorequivszc[i]) {
1792 1827 colorequivszc[i] = (a << 4);
1793 1828 }
1794 1829 }
1795 1830 }
1796 1831
1797 1832 /* size for mnoderanges */
1798 1833 for (mnoderangecnt = 0, i = 0; i < max_mem_nodes; i++)
1799 1834 mnoderangecnt += mnode_range_cnt(i);
1800 1835 if (plat_dr_support_memory()) {
1801 1836 /*
1802 1837 * Reserve enough space for memory DR operations.
1803 1838 * Two extra mnoderanges for possbile fragmentations,
1804 1839 * one for the 2G boundary and the other for the 4G boundary.
1805 1840 * We don't expect a memory board crossing the 16M boundary
1806 1841 * for memory hot-add operations on x86 platforms.
1807 1842 */
1808 1843 mnoderangecnt += 2 + max_mem_nodes - lgrp_plat_node_cnt;
1809 1844 }
1810 1845 colorsz = mnoderangecnt * sizeof (mnoderange_t);
1811 1846
1812 1847 /* size for fpc_mutex and cpc_mutex */
1813 1848 colorsz += (2 * max_mem_nodes * sizeof (kmutex_t) * NPC_MUTEX);
1814 1849
1815 1850 /* size of page_freelists */
1816 1851 colorsz += mnoderangecnt * sizeof (page_t ***);
1817 1852 colorsz += mnoderangecnt * mmu_page_sizes * sizeof (page_t **);
1818 1853
1819 1854 for (i = 0; i < mmu_page_sizes; i++) {
1820 1855 colors = page_get_pagecolors(i);
1821 1856 colorsz += mnoderangecnt * colors * sizeof (page_t *);
1822 1857 }
1823 1858
1824 1859 /* size of page_cachelists */
1825 1860 colorsz += mnoderangecnt * sizeof (page_t **);
1826 1861 colorsz += mnoderangecnt * page_colors * sizeof (page_t *);
1827 1862
1828 1863 return (colorsz);
1829 1864 }
1830 1865
1831 1866 /*
1832 1867 * Called once at startup to configure page_coloring data structures and
1833 1868 * does the 1st page_free()/page_freelist_add().
1834 1869 */
1835 1870 void
1836 1871 page_coloring_setup(caddr_t pcmemaddr)
1837 1872 {
1838 1873 int i;
1839 1874 int j;
1840 1875 int k;
1841 1876 caddr_t addr;
1842 1877 int colors;
1843 1878
1844 1879 /*
1845 1880 * do page coloring setup
1846 1881 */
1847 1882 addr = pcmemaddr;
1848 1883
1849 1884 mnoderanges = (mnoderange_t *)addr;
1850 1885 addr += (mnoderangecnt * sizeof (mnoderange_t));
1851 1886
1852 1887 mnode_range_setup(mnoderanges);
1853 1888
1854 1889 if (physmax4g)
1855 1890 mtype4g = pfn_2_mtype(0xfffff);
1856 1891
1857 1892 for (k = 0; k < NPC_MUTEX; k++) {
1858 1893 fpc_mutex[k] = (kmutex_t *)addr;
1859 1894 addr += (max_mem_nodes * sizeof (kmutex_t));
1860 1895 }
1861 1896 for (k = 0; k < NPC_MUTEX; k++) {
1862 1897 cpc_mutex[k] = (kmutex_t *)addr;
1863 1898 addr += (max_mem_nodes * sizeof (kmutex_t));
1864 1899 }
1865 1900 page_freelists = (page_t ****)addr;
1866 1901 addr += (mnoderangecnt * sizeof (page_t ***));
1867 1902
1868 1903 page_cachelists = (page_t ***)addr;
1869 1904 addr += (mnoderangecnt * sizeof (page_t **));
1870 1905
1871 1906 for (i = 0; i < mnoderangecnt; i++) {
1872 1907 page_freelists[i] = (page_t ***)addr;
1873 1908 addr += (mmu_page_sizes * sizeof (page_t **));
1874 1909
1875 1910 for (j = 0; j < mmu_page_sizes; j++) {
1876 1911 colors = page_get_pagecolors(j);
1877 1912 page_freelists[i][j] = (page_t **)addr;
1878 1913 addr += (colors * sizeof (page_t *));
1879 1914 }
1880 1915 page_cachelists[i] = (page_t **)addr;
1881 1916 addr += (page_colors * sizeof (page_t *));
1882 1917 }
1883 1918 }
1884 1919
1885 1920 #if defined(__xpv)
1886 1921 /*
1887 1922 * Give back 10% of the io_pool pages to the free list.
1888 1923 * Don't shrink the pool below some absolute minimum.
1889 1924 */
1890 1925 static void
1891 1926 page_io_pool_shrink()
1892 1927 {
1893 1928 int retcnt;
1894 1929 page_t *pp, *pp_first, *pp_last, **curpool;
1895 1930 mfn_t mfn;
1896 1931 int bothpools = 0;
1897 1932
1898 1933 mutex_enter(&io_pool_lock);
1899 1934 io_pool_shrink_attempts++; /* should be a kstat? */
1900 1935 retcnt = io_pool_cnt / 10;
1901 1936 if (io_pool_cnt - retcnt < io_pool_cnt_min)
1902 1937 retcnt = io_pool_cnt - io_pool_cnt_min;
1903 1938 if (retcnt <= 0)
1904 1939 goto done;
1905 1940 io_pool_shrinks++; /* should be a kstat? */
1906 1941 curpool = &io_pool_4g;
1907 1942 domore:
1908 1943 /*
1909 1944 * Loop through taking pages from the end of the list
1910 1945 * (highest mfns) till amount to return reached.
1911 1946 */
1912 1947 for (pp = *curpool; pp && retcnt > 0; ) {
1913 1948 pp_first = pp_last = pp->p_prev;
1914 1949 if (pp_first == *curpool)
1915 1950 break;
1916 1951 retcnt--;
1917 1952 io_pool_cnt--;
1918 1953 page_io_pool_sub(curpool, pp_first, pp_last);
1919 1954 if ((mfn = pfn_to_mfn(pp->p_pagenum)) < start_mfn)
1920 1955 start_mfn = mfn;
1921 1956 page_free(pp_first, 1);
1922 1957 pp = *curpool;
1923 1958 }
1924 1959 if (retcnt != 0 && !bothpools) {
1925 1960 /*
1926 1961 * If not enough found in less constrained pool try the
1927 1962 * more constrained one.
1928 1963 */
1929 1964 curpool = &io_pool_16m;
1930 1965 bothpools = 1;
1931 1966 goto domore;
1932 1967 }
1933 1968 done:
1934 1969 mutex_exit(&io_pool_lock);
1935 1970 }
1936 1971
1937 1972 #endif /* __xpv */
1938 1973
1939 1974 uint_t
1940 1975 page_create_update_flags_x86(uint_t flags)
1941 1976 {
1942 1977 #if defined(__xpv)
1943 1978 /*
1944 1979 * Check this is an urgent allocation and free pages are depleted.
1945 1980 */
1946 1981 if (!(flags & PG_WAIT) && freemem < desfree)
1947 1982 page_io_pool_shrink();
1948 1983 #else /* !__xpv */
1949 1984 /*
1950 1985 * page_create_get_something may call this because 4g memory may be
1951 1986 * depleted. Set flags to allow for relocation of base page below
1952 1987 * 4g if necessary.
1953 1988 */
1954 1989 if (physmax4g)
1955 1990 flags |= (PGI_PGCPSZC0 | PGI_PGCPHIPRI);
1956 1991 #endif /* __xpv */
1957 1992 return (flags);
1958 1993 }
1959 1994
1960 1995 /*ARGSUSED*/
1961 1996 int
1962 1997 bp_color(struct buf *bp)
1963 1998 {
1964 1999 return (0);
1965 2000 }
1966 2001
1967 2002 #if defined(__xpv)
1968 2003
1969 2004 /*
1970 2005 * Take pages out of an io_pool
1971 2006 */
1972 2007 static void
1973 2008 page_io_pool_sub(page_t **poolp, page_t *pp_first, page_t *pp_last)
1974 2009 {
1975 2010 if (*poolp == pp_first) {
1976 2011 *poolp = pp_last->p_next;
1977 2012 if (*poolp == pp_first)
1978 2013 *poolp = NULL;
1979 2014 }
1980 2015 pp_first->p_prev->p_next = pp_last->p_next;
1981 2016 pp_last->p_next->p_prev = pp_first->p_prev;
1982 2017 pp_first->p_prev = pp_last;
1983 2018 pp_last->p_next = pp_first;
1984 2019 }
1985 2020
1986 2021 /*
1987 2022 * Put a page on the io_pool list. The list is ordered by increasing MFN.
1988 2023 */
1989 2024 static void
1990 2025 page_io_pool_add(page_t **poolp, page_t *pp)
1991 2026 {
1992 2027 page_t *look;
1993 2028 mfn_t mfn = mfn_list[pp->p_pagenum];
1994 2029
1995 2030 if (*poolp == NULL) {
1996 2031 *poolp = pp;
1997 2032 pp->p_next = pp;
1998 2033 pp->p_prev = pp;
1999 2034 return;
2000 2035 }
2001 2036
2002 2037 /*
2003 2038 * Since we try to take pages from the high end of the pool
2004 2039 * chances are good that the pages to be put on the list will
2005 2040 * go at or near the end of the list. so start at the end and
2006 2041 * work backwards.
2007 2042 */
2008 2043 look = (*poolp)->p_prev;
2009 2044 while (mfn < mfn_list[look->p_pagenum]) {
2010 2045 look = look->p_prev;
2011 2046 if (look == (*poolp)->p_prev)
2012 2047 break; /* backed all the way to front of list */
2013 2048 }
2014 2049
2015 2050 /* insert after look */
2016 2051 pp->p_prev = look;
2017 2052 pp->p_next = look->p_next;
2018 2053 pp->p_next->p_prev = pp;
2019 2054 look->p_next = pp;
2020 2055 if (mfn < mfn_list[(*poolp)->p_pagenum]) {
2021 2056 /*
2022 2057 * we inserted a new first list element
2023 2058 * adjust pool pointer to newly inserted element
2024 2059 */
2025 2060 *poolp = pp;
2026 2061 }
2027 2062 }
2028 2063
2029 2064 /*
2030 2065 * Add a page to the io_pool. Setting the force flag will force the page
2031 2066 * into the io_pool no matter what.
2032 2067 */
2033 2068 static void
2034 2069 add_page_to_pool(page_t *pp, int force)
2035 2070 {
2036 2071 page_t *highest;
2037 2072 page_t *freep = NULL;
2038 2073
2039 2074 mutex_enter(&io_pool_lock);
2040 2075 /*
2041 2076 * Always keep the scarce low memory pages
2042 2077 */
2043 2078 if (mfn_list[pp->p_pagenum] < PFN_16MEG) {
2044 2079 ++io_pool_cnt;
2045 2080 page_io_pool_add(&io_pool_16m, pp);
2046 2081 goto done;
2047 2082 }
2048 2083 if (io_pool_cnt < io_pool_cnt_max || force || io_pool_4g == NULL) {
2049 2084 ++io_pool_cnt;
2050 2085 page_io_pool_add(&io_pool_4g, pp);
2051 2086 } else {
2052 2087 highest = io_pool_4g->p_prev;
2053 2088 if (mfn_list[pp->p_pagenum] < mfn_list[highest->p_pagenum]) {
2054 2089 page_io_pool_sub(&io_pool_4g, highest, highest);
2055 2090 page_io_pool_add(&io_pool_4g, pp);
2056 2091 freep = highest;
2057 2092 } else {
2058 2093 freep = pp;
2059 2094 }
2060 2095 }
2061 2096 done:
2062 2097 mutex_exit(&io_pool_lock);
2063 2098 if (freep)
2064 2099 page_free(freep, 1);
2065 2100 }
2066 2101
2067 2102
2068 2103 int contig_pfn_cnt; /* no of pfns in the contig pfn list */
2069 2104 int contig_pfn_max; /* capacity of the contig pfn list */
2070 2105 int next_alloc_pfn; /* next position in list to start a contig search */
2071 2106 int contig_pfnlist_updates; /* pfn list update count */
2072 2107 int contig_pfnlist_builds; /* how many times have we (re)built list */
2073 2108 int contig_pfnlist_buildfailed; /* how many times has list build failed */
2074 2109 int create_contig_pending; /* nonzero means taskq creating contig list */
2075 2110 pfn_t *contig_pfn_list = NULL; /* list of contig pfns in ascending mfn order */
2076 2111
2077 2112 /*
2078 2113 * Function to use in sorting a list of pfns by their underlying mfns.
2079 2114 */
2080 2115 static int
2081 2116 mfn_compare(const void *pfnp1, const void *pfnp2)
2082 2117 {
2083 2118 mfn_t mfn1 = mfn_list[*(pfn_t *)pfnp1];
2084 2119 mfn_t mfn2 = mfn_list[*(pfn_t *)pfnp2];
2085 2120
2086 2121 if (mfn1 > mfn2)
2087 2122 return (1);
2088 2123 if (mfn1 < mfn2)
2089 2124 return (-1);
2090 2125 return (0);
2091 2126 }
2092 2127
2093 2128 /*
2094 2129 * Compact the contig_pfn_list by tossing all the non-contiguous
2095 2130 * elements from the list.
2096 2131 */
2097 2132 static void
2098 2133 compact_contig_pfn_list(void)
2099 2134 {
2100 2135 pfn_t pfn, lapfn, prev_lapfn;
2101 2136 mfn_t mfn;
2102 2137 int i, newcnt = 0;
2103 2138
2104 2139 prev_lapfn = 0;
2105 2140 for (i = 0; i < contig_pfn_cnt - 1; i++) {
2106 2141 pfn = contig_pfn_list[i];
2107 2142 lapfn = contig_pfn_list[i + 1];
2108 2143 mfn = mfn_list[pfn];
2109 2144 /*
2110 2145 * See if next pfn is for a contig mfn
2111 2146 */
2112 2147 if (mfn_list[lapfn] != mfn + 1)
2113 2148 continue;
2114 2149 /*
2115 2150 * pfn and lookahead are both put in list
2116 2151 * unless pfn is the previous lookahead.
2117 2152 */
2118 2153 if (pfn != prev_lapfn)
2119 2154 contig_pfn_list[newcnt++] = pfn;
2120 2155 contig_pfn_list[newcnt++] = lapfn;
2121 2156 prev_lapfn = lapfn;
2122 2157 }
2123 2158 for (i = newcnt; i < contig_pfn_cnt; i++)
2124 2159 contig_pfn_list[i] = 0;
2125 2160 contig_pfn_cnt = newcnt;
2126 2161 }
2127 2162
2128 2163 /*ARGSUSED*/
2129 2164 static void
2130 2165 call_create_contiglist(void *arg)
2131 2166 {
2132 2167 (void) create_contig_pfnlist(PG_WAIT);
2133 2168 }
2134 2169
2135 2170 /*
2136 2171 * Create list of freelist pfns that have underlying
2137 2172 * contiguous mfns. The list is kept in ascending mfn order.
2138 2173 * returns 1 if list created else 0.
2139 2174 */
2140 2175 static int
2141 2176 create_contig_pfnlist(uint_t flags)
2142 2177 {
2143 2178 pfn_t pfn;
2144 2179 page_t *pp;
2145 2180 int ret = 1;
2146 2181
2147 2182 mutex_enter(&contig_list_lock);
2148 2183 if (contig_pfn_list != NULL)
2149 2184 goto out;
2150 2185 contig_pfn_max = freemem + (freemem / 10);
2151 2186 contig_pfn_list = kmem_zalloc(contig_pfn_max * sizeof (pfn_t),
2152 2187 (flags & PG_WAIT) ? KM_SLEEP : KM_NOSLEEP);
2153 2188 if (contig_pfn_list == NULL) {
2154 2189 /*
2155 2190 * If we could not create the contig list (because
2156 2191 * we could not sleep for memory). Dispatch a taskq that can
2157 2192 * sleep to get the memory.
2158 2193 */
2159 2194 if (!create_contig_pending) {
2160 2195 if (taskq_dispatch(system_taskq, call_create_contiglist,
2161 2196 NULL, TQ_NOSLEEP) != NULL)
2162 2197 create_contig_pending = 1;
2163 2198 }
2164 2199 contig_pfnlist_buildfailed++; /* count list build failures */
2165 2200 ret = 0;
2166 2201 goto out;
2167 2202 }
2168 2203 create_contig_pending = 0;
2169 2204 ASSERT(contig_pfn_cnt == 0);
2170 2205 for (pfn = 0; pfn < mfn_count; pfn++) {
2171 2206 pp = page_numtopp_nolock(pfn);
2172 2207 if (pp == NULL || !PP_ISFREE(pp))
2173 2208 continue;
2174 2209 contig_pfn_list[contig_pfn_cnt] = pfn;
2175 2210 if (++contig_pfn_cnt == contig_pfn_max)
2176 2211 break;
2177 2212 }
2178 2213 /*
2179 2214 * Sanity check the new list.
2180 2215 */
2181 2216 if (contig_pfn_cnt < 2) { /* no contig pfns */
2182 2217 contig_pfn_cnt = 0;
2183 2218 contig_pfnlist_buildfailed++;
2184 2219 kmem_free(contig_pfn_list, contig_pfn_max * sizeof (pfn_t));
2185 2220 contig_pfn_list = NULL;
2186 2221 contig_pfn_max = 0;
2187 2222 ret = 0;
2188 2223 goto out;
2189 2224 }
2190 2225 qsort(contig_pfn_list, contig_pfn_cnt, sizeof (pfn_t), mfn_compare);
2191 2226 compact_contig_pfn_list();
2192 2227 /*
2193 2228 * Make sure next search of the newly created contiguous pfn
2194 2229 * list starts at the beginning of the list.
2195 2230 */
2196 2231 next_alloc_pfn = 0;
2197 2232 contig_pfnlist_builds++; /* count list builds */
2198 2233 out:
2199 2234 mutex_exit(&contig_list_lock);
2200 2235 return (ret);
2201 2236 }
2202 2237
2203 2238
2204 2239 /*
2205 2240 * Toss the current contig pfnlist. Someone is about to do a massive
2206 2241 * update to pfn<->mfn mappings. So we have them destroy the list and lock
2207 2242 * it till they are done with their update.
2208 2243 */
2209 2244 void
2210 2245 clear_and_lock_contig_pfnlist()
2211 2246 {
2212 2247 pfn_t *listp = NULL;
2213 2248 size_t listsize;
2214 2249
2215 2250 mutex_enter(&contig_list_lock);
2216 2251 if (contig_pfn_list != NULL) {
2217 2252 listp = contig_pfn_list;
2218 2253 listsize = contig_pfn_max * sizeof (pfn_t);
2219 2254 contig_pfn_list = NULL;
2220 2255 contig_pfn_max = contig_pfn_cnt = 0;
2221 2256 }
2222 2257 if (listp != NULL)
2223 2258 kmem_free(listp, listsize);
2224 2259 }
2225 2260
2226 2261 /*
2227 2262 * Unlock the contig_pfn_list. The next attempted use of it will cause
2228 2263 * it to be re-created.
2229 2264 */
2230 2265 void
2231 2266 unlock_contig_pfnlist()
2232 2267 {
2233 2268 mutex_exit(&contig_list_lock);
2234 2269 }
2235 2270
2236 2271 /*
2237 2272 * Update the contiguous pfn list in response to a pfn <-> mfn reassignment
2238 2273 */
2239 2274 void
2240 2275 update_contig_pfnlist(pfn_t pfn, mfn_t oldmfn, mfn_t newmfn)
2241 2276 {
2242 2277 int probe_hi, probe_lo, probe_pos, insert_after, insert_point;
2243 2278 pfn_t probe_pfn;
2244 2279 mfn_t probe_mfn;
2245 2280 int drop_lock = 0;
2246 2281
2247 2282 if (mutex_owner(&contig_list_lock) != curthread) {
2248 2283 drop_lock = 1;
2249 2284 mutex_enter(&contig_list_lock);
2250 2285 }
2251 2286 if (contig_pfn_list == NULL)
2252 2287 goto done;
2253 2288 contig_pfnlist_updates++;
2254 2289 /*
2255 2290 * Find the pfn in the current list. Use a binary chop to locate it.
2256 2291 */
2257 2292 probe_hi = contig_pfn_cnt - 1;
2258 2293 probe_lo = 0;
2259 2294 probe_pos = (probe_hi + probe_lo) / 2;
2260 2295 while ((probe_pfn = contig_pfn_list[probe_pos]) != pfn) {
2261 2296 if (probe_pos == probe_lo) { /* pfn not in list */
2262 2297 probe_pos = -1;
2263 2298 break;
2264 2299 }
2265 2300 if (pfn_to_mfn(probe_pfn) <= oldmfn)
2266 2301 probe_lo = probe_pos;
2267 2302 else
2268 2303 probe_hi = probe_pos;
2269 2304 probe_pos = (probe_hi + probe_lo) / 2;
2270 2305 }
2271 2306 if (probe_pos >= 0) {
2272 2307 /*
2273 2308 * Remove pfn from list and ensure next alloc
2274 2309 * position stays in bounds.
2275 2310 */
2276 2311 if (--contig_pfn_cnt <= next_alloc_pfn)
2277 2312 next_alloc_pfn = 0;
2278 2313 if (contig_pfn_cnt < 2) { /* no contig pfns */
2279 2314 contig_pfn_cnt = 0;
2280 2315 kmem_free(contig_pfn_list,
2281 2316 contig_pfn_max * sizeof (pfn_t));
2282 2317 contig_pfn_list = NULL;
2283 2318 contig_pfn_max = 0;
2284 2319 goto done;
2285 2320 }
2286 2321 ovbcopy(&contig_pfn_list[probe_pos + 1],
2287 2322 &contig_pfn_list[probe_pos],
2288 2323 (contig_pfn_cnt - probe_pos) * sizeof (pfn_t));
2289 2324 }
2290 2325 if (newmfn == MFN_INVALID)
2291 2326 goto done;
2292 2327 /*
2293 2328 * Check if new mfn has adjacent mfns in the list
2294 2329 */
2295 2330 probe_hi = contig_pfn_cnt - 1;
2296 2331 probe_lo = 0;
2297 2332 insert_after = -2;
2298 2333 do {
2299 2334 probe_pos = (probe_hi + probe_lo) / 2;
2300 2335 probe_mfn = pfn_to_mfn(contig_pfn_list[probe_pos]);
2301 2336 if (newmfn == probe_mfn + 1)
2302 2337 insert_after = probe_pos;
2303 2338 else if (newmfn == probe_mfn - 1)
2304 2339 insert_after = probe_pos - 1;
2305 2340 if (probe_pos == probe_lo)
2306 2341 break;
2307 2342 if (probe_mfn <= newmfn)
2308 2343 probe_lo = probe_pos;
2309 2344 else
2310 2345 probe_hi = probe_pos;
2311 2346 } while (insert_after == -2);
2312 2347 /*
2313 2348 * If there is space in the list and there are adjacent mfns
2314 2349 * insert the pfn in to its proper place in the list.
2315 2350 */
2316 2351 if (insert_after != -2 && contig_pfn_cnt + 1 <= contig_pfn_max) {
2317 2352 insert_point = insert_after + 1;
2318 2353 ovbcopy(&contig_pfn_list[insert_point],
2319 2354 &contig_pfn_list[insert_point + 1],
2320 2355 (contig_pfn_cnt - insert_point) * sizeof (pfn_t));
2321 2356 contig_pfn_list[insert_point] = pfn;
2322 2357 contig_pfn_cnt++;
2323 2358 }
2324 2359 done:
2325 2360 if (drop_lock)
2326 2361 mutex_exit(&contig_list_lock);
2327 2362 }
2328 2363
2329 2364 /*
2330 2365 * Called to (re-)populate the io_pool from the free page lists.
2331 2366 */
2332 2367 long
2333 2368 populate_io_pool(void)
2334 2369 {
2335 2370 pfn_t pfn;
2336 2371 mfn_t mfn, max_mfn;
2337 2372 page_t *pp;
2338 2373
2339 2374 /*
2340 2375 * Figure out the bounds of the pool on first invocation.
2341 2376 * We use a percentage of memory for the io pool size.
2342 2377 * we allow that to shrink, but not to less than a fixed minimum
2343 2378 */
2344 2379 if (io_pool_cnt_max == 0) {
2345 2380 io_pool_cnt_max = physmem / (100 / io_pool_physmem_pct);
2346 2381 io_pool_cnt_lowater = io_pool_cnt_max;
2347 2382 /*
2348 2383 * This is the first time in populate_io_pool, grab a va to use
2349 2384 * when we need to allocate pages.
2350 2385 */
2351 2386 io_pool_kva = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
2352 2387 }
2353 2388 /*
2354 2389 * If we are out of pages in the pool, then grow the size of the pool
2355 2390 */
2356 2391 if (io_pool_cnt == 0) {
2357 2392 /*
2358 2393 * Grow the max size of the io pool by 5%, but never more than
2359 2394 * 25% of physical memory.
2360 2395 */
2361 2396 if (io_pool_cnt_max < physmem / 4)
2362 2397 io_pool_cnt_max += io_pool_cnt_max / 20;
2363 2398 }
2364 2399 io_pool_grows++; /* should be a kstat? */
2365 2400
2366 2401 /*
2367 2402 * Get highest mfn on this platform, but limit to the 32 bit DMA max.
2368 2403 */
2369 2404 (void) mfn_to_pfn(start_mfn);
2370 2405 max_mfn = MIN(cached_max_mfn, PFN_4GIG);
2371 2406 for (mfn = start_mfn; mfn < max_mfn; start_mfn = ++mfn) {
2372 2407 pfn = mfn_to_pfn(mfn);
2373 2408 if (pfn & PFN_IS_FOREIGN_MFN)
2374 2409 continue;
2375 2410 /*
2376 2411 * try to allocate it from free pages
2377 2412 */
2378 2413 pp = page_numtopp_alloc(pfn);
2379 2414 if (pp == NULL)
2380 2415 continue;
2381 2416 PP_CLRFREE(pp);
2382 2417 add_page_to_pool(pp, 1);
2383 2418 if (io_pool_cnt >= io_pool_cnt_max)
2384 2419 break;
2385 2420 }
2386 2421
2387 2422 return (io_pool_cnt);
2388 2423 }
2389 2424
2390 2425 /*
2391 2426 * Destroy a page that was being used for DMA I/O. It may or
2392 2427 * may not actually go back to the io_pool.
2393 2428 */
2394 2429 void
2395 2430 page_destroy_io(page_t *pp)
2396 2431 {
2397 2432 mfn_t mfn = mfn_list[pp->p_pagenum];
2398 2433
2399 2434 /*
2400 2435 * When the page was alloc'd a reservation was made, release it now
2401 2436 */
2402 2437 page_unresv(1);
2403 2438 /*
2404 2439 * Unload translations, if any, then hash out the
2405 2440 * page to erase its identity.
2406 2441 */
2407 2442 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
2408 2443 page_hashout(pp, NULL);
2409 2444
2410 2445 /*
2411 2446 * If the page came from the free lists, just put it back to them.
2412 2447 * DomU pages always go on the free lists as well.
2413 2448 */
2414 2449 if (!DOMAIN_IS_INITDOMAIN(xen_info) || mfn >= PFN_4GIG) {
2415 2450 page_free(pp, 1);
2416 2451 return;
2417 2452 }
2418 2453
2419 2454 add_page_to_pool(pp, 0);
2420 2455 }
2421 2456
2422 2457
2423 2458 long contig_searches; /* count of times contig pages requested */
2424 2459 long contig_search_restarts; /* count of contig ranges tried */
2425 2460 long contig_search_failed; /* count of contig alloc failures */
2426 2461
2427 2462 /*
2428 2463 * Free partial page list
2429 2464 */
2430 2465 static void
2431 2466 free_partial_list(page_t **pplist)
2432 2467 {
2433 2468 page_t *pp;
2434 2469
2435 2470 while (*pplist != NULL) {
2436 2471 pp = *pplist;
2437 2472 page_io_pool_sub(pplist, pp, pp);
2438 2473 page_free(pp, 1);
2439 2474 }
2440 2475 }
2441 2476
2442 2477 /*
2443 2478 * Look thru the contiguous pfns that are not part of the io_pool for
2444 2479 * contiguous free pages. Return a list of the found pages or NULL.
2445 2480 */
2446 2481 page_t *
2447 2482 find_contig_free(uint_t npages, uint_t flags, uint64_t pfnseg,
2448 2483 pgcnt_t pfnalign)
2449 2484 {
2450 2485 page_t *pp, *plist = NULL;
2451 2486 mfn_t mfn, prev_mfn, start_mfn;
2452 2487 pfn_t pfn;
2453 2488 int pages_needed, pages_requested;
2454 2489 int search_start;
2455 2490
2456 2491 /*
2457 2492 * create the contig pfn list if not already done
2458 2493 */
2459 2494 retry:
2460 2495 mutex_enter(&contig_list_lock);
2461 2496 if (contig_pfn_list == NULL) {
2462 2497 mutex_exit(&contig_list_lock);
2463 2498 if (!create_contig_pfnlist(flags)) {
2464 2499 return (NULL);
2465 2500 }
2466 2501 goto retry;
2467 2502 }
2468 2503 contig_searches++;
2469 2504 /*
2470 2505 * Search contiguous pfn list for physically contiguous pages not in
2471 2506 * the io_pool. Start the search where the last search left off.
2472 2507 */
2473 2508 pages_requested = pages_needed = npages;
2474 2509 search_start = next_alloc_pfn;
2475 2510 start_mfn = prev_mfn = 0;
2476 2511 while (pages_needed) {
2477 2512 pfn = contig_pfn_list[next_alloc_pfn];
2478 2513 mfn = pfn_to_mfn(pfn);
2479 2514 /*
2480 2515 * Check if mfn is first one or contig to previous one and
2481 2516 * if page corresponding to mfn is free and that mfn
2482 2517 * range is not crossing a segment boundary.
2483 2518 */
2484 2519 if ((prev_mfn == 0 || mfn == prev_mfn + 1) &&
2485 2520 (pp = page_numtopp_alloc(pfn)) != NULL &&
2486 2521 !((mfn & pfnseg) < (start_mfn & pfnseg))) {
2487 2522 PP_CLRFREE(pp);
2488 2523 page_io_pool_add(&plist, pp);
2489 2524 pages_needed--;
2490 2525 if (prev_mfn == 0) {
2491 2526 if (pfnalign &&
2492 2527 mfn != P2ROUNDUP(mfn, pfnalign)) {
2493 2528 /*
2494 2529 * not properly aligned
2495 2530 */
2496 2531 contig_search_restarts++;
2497 2532 free_partial_list(&plist);
2498 2533 pages_needed = pages_requested;
2499 2534 start_mfn = prev_mfn = 0;
2500 2535 goto skip;
2501 2536 }
2502 2537 start_mfn = mfn;
2503 2538 }
2504 2539 prev_mfn = mfn;
2505 2540 } else {
2506 2541 contig_search_restarts++;
2507 2542 free_partial_list(&plist);
2508 2543 pages_needed = pages_requested;
2509 2544 start_mfn = prev_mfn = 0;
2510 2545 }
2511 2546 skip:
2512 2547 if (++next_alloc_pfn == contig_pfn_cnt)
2513 2548 next_alloc_pfn = 0;
2514 2549 if (next_alloc_pfn == search_start)
2515 2550 break; /* all pfns searched */
2516 2551 }
2517 2552 mutex_exit(&contig_list_lock);
2518 2553 if (pages_needed) {
2519 2554 contig_search_failed++;
2520 2555 /*
2521 2556 * Failed to find enough contig pages.
2522 2557 * free partial page list
2523 2558 */
2524 2559 free_partial_list(&plist);
2525 2560 }
2526 2561 return (plist);
2527 2562 }
2528 2563
2529 2564 /*
2530 2565 * Search the reserved io pool pages for a page range with the
2531 2566 * desired characteristics.
2532 2567 */
2533 2568 page_t *
2534 2569 page_io_pool_alloc(ddi_dma_attr_t *mattr, int contig, pgcnt_t minctg)
2535 2570 {
2536 2571 page_t *pp_first, *pp_last;
2537 2572 page_t *pp, **poolp;
2538 2573 pgcnt_t nwanted, pfnalign;
2539 2574 uint64_t pfnseg;
2540 2575 mfn_t mfn, tmfn, hi_mfn, lo_mfn;
2541 2576 int align, attempt = 0;
2542 2577
2543 2578 if (minctg == 1)
2544 2579 contig = 0;
2545 2580 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2546 2581 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2547 2582 pfnseg = mmu_btop(mattr->dma_attr_seg);
2548 2583 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2549 2584 if (align > MMU_PAGESIZE)
2550 2585 pfnalign = mmu_btop(align);
2551 2586 else
2552 2587 pfnalign = 0;
2553 2588
2554 2589 try_again:
2555 2590 /*
2556 2591 * See if we want pages for a legacy device
2557 2592 */
2558 2593 if (hi_mfn < PFN_16MEG)
2559 2594 poolp = &io_pool_16m;
2560 2595 else
2561 2596 poolp = &io_pool_4g;
2562 2597 try_smaller:
2563 2598 /*
2564 2599 * Take pages from I/O pool. We'll use pages from the highest
2565 2600 * MFN range possible.
2566 2601 */
2567 2602 pp_first = pp_last = NULL;
2568 2603 mutex_enter(&io_pool_lock);
2569 2604 nwanted = minctg;
2570 2605 for (pp = *poolp; pp && nwanted > 0; ) {
2571 2606 pp = pp->p_prev;
2572 2607
2573 2608 /*
2574 2609 * skip pages above allowable range
2575 2610 */
2576 2611 mfn = mfn_list[pp->p_pagenum];
2577 2612 if (hi_mfn < mfn)
2578 2613 goto skip;
2579 2614
2580 2615 /*
2581 2616 * stop at pages below allowable range
2582 2617 */
2583 2618 if (lo_mfn > mfn)
2584 2619 break;
2585 2620 restart:
2586 2621 if (pp_last == NULL) {
2587 2622 /*
2588 2623 * Check alignment
2589 2624 */
2590 2625 tmfn = mfn - (minctg - 1);
2591 2626 if (pfnalign && tmfn != P2ROUNDUP(tmfn, pfnalign))
2592 2627 goto skip; /* not properly aligned */
2593 2628 /*
2594 2629 * Check segment
2595 2630 */
2596 2631 if ((mfn & pfnseg) < (tmfn & pfnseg))
2597 2632 goto skip; /* crosses seg boundary */
2598 2633 /*
2599 2634 * Start building page list
2600 2635 */
2601 2636 pp_first = pp_last = pp;
2602 2637 nwanted--;
2603 2638 } else {
2604 2639 /*
2605 2640 * check physical contiguity if required
2606 2641 */
2607 2642 if (contig &&
2608 2643 mfn_list[pp_first->p_pagenum] != mfn + 1) {
2609 2644 /*
2610 2645 * not a contiguous page, restart list.
2611 2646 */
2612 2647 pp_last = NULL;
2613 2648 nwanted = minctg;
2614 2649 goto restart;
2615 2650 } else { /* add page to list */
2616 2651 pp_first = pp;
2617 2652 nwanted--;
2618 2653 }
2619 2654 }
2620 2655 skip:
2621 2656 if (pp == *poolp)
2622 2657 break;
2623 2658 }
2624 2659
2625 2660 /*
2626 2661 * If we didn't find memory. Try the more constrained pool, then
2627 2662 * sweep free pages into the DMA pool and try again.
2628 2663 */
2629 2664 if (nwanted != 0) {
2630 2665 mutex_exit(&io_pool_lock);
2631 2666 /*
2632 2667 * If we were looking in the less constrained pool and
2633 2668 * didn't find pages, try the more constrained pool.
2634 2669 */
2635 2670 if (poolp == &io_pool_4g) {
2636 2671 poolp = &io_pool_16m;
2637 2672 goto try_smaller;
2638 2673 }
2639 2674 kmem_reap();
2640 2675 if (++attempt < 4) {
2641 2676 /*
2642 2677 * Grab some more io_pool pages
2643 2678 */
2644 2679 (void) populate_io_pool();
2645 2680 goto try_again; /* go around and retry */
2646 2681 }
2647 2682 return (NULL);
2648 2683 }
2649 2684 /*
2650 2685 * Found the pages, now snip them from the list
2651 2686 */
2652 2687 page_io_pool_sub(poolp, pp_first, pp_last);
2653 2688 io_pool_cnt -= minctg;
2654 2689 /*
2655 2690 * reset low water mark
2656 2691 */
2657 2692 if (io_pool_cnt < io_pool_cnt_lowater)
2658 2693 io_pool_cnt_lowater = io_pool_cnt;
2659 2694 mutex_exit(&io_pool_lock);
2660 2695 return (pp_first);
2661 2696 }
2662 2697
2663 2698 page_t *
2664 2699 page_swap_with_hypervisor(struct vnode *vp, u_offset_t off, caddr_t vaddr,
2665 2700 ddi_dma_attr_t *mattr, uint_t flags, pgcnt_t minctg)
2666 2701 {
2667 2702 uint_t kflags;
2668 2703 int order, extra, extpages, i, contig, nbits, extents;
2669 2704 page_t *pp, *expp, *pp_first, **pplist = NULL;
2670 2705 mfn_t *mfnlist = NULL;
2671 2706
2672 2707 contig = flags & PG_PHYSCONTIG;
2673 2708 if (minctg == 1)
2674 2709 contig = 0;
2675 2710 flags &= ~PG_PHYSCONTIG;
2676 2711 kflags = flags & PG_WAIT ? KM_SLEEP : KM_NOSLEEP;
2677 2712 /*
2678 2713 * Hypervisor will allocate extents, if we want contig
2679 2714 * pages extent must be >= minctg
2680 2715 */
2681 2716 if (contig) {
2682 2717 order = highbit(minctg) - 1;
2683 2718 if (minctg & ((1 << order) - 1))
2684 2719 order++;
2685 2720 extpages = 1 << order;
2686 2721 } else {
2687 2722 order = 0;
2688 2723 extpages = minctg;
2689 2724 }
2690 2725 if (extpages > minctg) {
2691 2726 extra = extpages - minctg;
2692 2727 if (!page_resv(extra, kflags))
2693 2728 return (NULL);
2694 2729 }
2695 2730 pp_first = NULL;
2696 2731 pplist = kmem_alloc(extpages * sizeof (page_t *), kflags);
2697 2732 if (pplist == NULL)
2698 2733 goto balloon_fail;
2699 2734 mfnlist = kmem_alloc(extpages * sizeof (mfn_t), kflags);
2700 2735 if (mfnlist == NULL)
2701 2736 goto balloon_fail;
2702 2737 pp = page_create_va(vp, off, minctg * PAGESIZE, flags, &kvseg, vaddr);
2703 2738 if (pp == NULL)
2704 2739 goto balloon_fail;
2705 2740 pp_first = pp;
2706 2741 if (extpages > minctg) {
2707 2742 /*
2708 2743 * fill out the rest of extent pages to swap
2709 2744 * with the hypervisor
2710 2745 */
2711 2746 for (i = 0; i < extra; i++) {
2712 2747 expp = page_create_va(vp,
2713 2748 (u_offset_t)(uintptr_t)io_pool_kva,
2714 2749 PAGESIZE, flags, &kvseg, io_pool_kva);
2715 2750 if (expp == NULL)
2716 2751 goto balloon_fail;
2717 2752 (void) hat_pageunload(expp, HAT_FORCE_PGUNLOAD);
2718 2753 page_io_unlock(expp);
2719 2754 page_hashout(expp, NULL);
2720 2755 page_io_lock(expp);
2721 2756 /*
2722 2757 * add page to end of list
2723 2758 */
2724 2759 expp->p_prev = pp_first->p_prev;
2725 2760 expp->p_next = pp_first;
2726 2761 expp->p_prev->p_next = expp;
2727 2762 pp_first->p_prev = expp;
2728 2763 }
2729 2764
2730 2765 }
2731 2766 for (i = 0; i < extpages; i++) {
2732 2767 pplist[i] = pp;
2733 2768 pp = pp->p_next;
2734 2769 }
2735 2770 nbits = highbit(mattr->dma_attr_addr_hi);
2736 2771 extents = contig ? 1 : minctg;
2737 2772 if (balloon_replace_pages(extents, pplist, nbits, order,
2738 2773 mfnlist) != extents) {
2739 2774 if (ioalloc_dbg)
2740 2775 cmn_err(CE_NOTE, "request to hypervisor"
2741 2776 " for %d pages, maxaddr %" PRIx64 " failed",
2742 2777 extpages, mattr->dma_attr_addr_hi);
2743 2778 goto balloon_fail;
2744 2779 }
2745 2780
2746 2781 kmem_free(pplist, extpages * sizeof (page_t *));
2747 2782 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2748 2783 /*
2749 2784 * Return any excess pages to free list
2750 2785 */
2751 2786 if (extpages > minctg) {
2752 2787 for (i = 0; i < extra; i++) {
2753 2788 pp = pp_first->p_prev;
2754 2789 page_sub(&pp_first, pp);
2755 2790 page_io_unlock(pp);
2756 2791 page_unresv(1);
2757 2792 page_free(pp, 1);
2758 2793 }
2759 2794 }
2760 2795 return (pp_first);
2761 2796 balloon_fail:
2762 2797 /*
2763 2798 * Return pages to free list and return failure
2764 2799 */
2765 2800 while (pp_first != NULL) {
2766 2801 pp = pp_first;
2767 2802 page_sub(&pp_first, pp);
2768 2803 page_io_unlock(pp);
2769 2804 if (pp->p_vnode != NULL)
2770 2805 page_hashout(pp, NULL);
2771 2806 page_free(pp, 1);
2772 2807 }
2773 2808 if (pplist)
2774 2809 kmem_free(pplist, extpages * sizeof (page_t *));
2775 2810 if (mfnlist)
2776 2811 kmem_free(mfnlist, extpages * sizeof (mfn_t));
2777 2812 page_unresv(extpages - minctg);
2778 2813 return (NULL);
2779 2814 }
2780 2815
2781 2816 static void
2782 2817 return_partial_alloc(page_t *plist)
2783 2818 {
2784 2819 page_t *pp;
2785 2820
2786 2821 while (plist != NULL) {
2787 2822 pp = plist;
2788 2823 page_sub(&plist, pp);
2789 2824 page_io_unlock(pp);
2790 2825 page_destroy_io(pp);
2791 2826 }
2792 2827 }
2793 2828
2794 2829 static page_t *
2795 2830 page_get_contigpages(
2796 2831 struct vnode *vp,
2797 2832 u_offset_t off,
2798 2833 int *npagesp,
2799 2834 uint_t flags,
2800 2835 caddr_t vaddr,
2801 2836 ddi_dma_attr_t *mattr)
2802 2837 {
2803 2838 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2804 2839 page_t *plist; /* list to return */
2805 2840 page_t *pp, *mcpl;
2806 2841 int contig, anyaddr, npages, getone = 0;
2807 2842 mfn_t lo_mfn;
2808 2843 mfn_t hi_mfn;
2809 2844 pgcnt_t pfnalign = 0;
2810 2845 int align, sgllen;
2811 2846 uint64_t pfnseg;
2812 2847 pgcnt_t minctg;
2813 2848
2814 2849 npages = *npagesp;
2815 2850 ASSERT(mattr != NULL);
2816 2851 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2817 2852 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2818 2853 sgllen = mattr->dma_attr_sgllen;
2819 2854 pfnseg = mmu_btop(mattr->dma_attr_seg);
2820 2855 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2821 2856 if (align > MMU_PAGESIZE)
2822 2857 pfnalign = mmu_btop(align);
2823 2858
2824 2859 contig = flags & PG_PHYSCONTIG;
2825 2860 if (npages == -1) {
2826 2861 npages = 1;
2827 2862 pfnalign = 0;
2828 2863 }
2829 2864 /*
2830 2865 * Clear the contig flag if only one page is needed.
2831 2866 */
2832 2867 if (npages == 1) {
2833 2868 getone = 1;
2834 2869 contig = 0;
2835 2870 }
2836 2871
2837 2872 /*
2838 2873 * Check if any page in the system is fine.
2839 2874 */
2840 2875 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn;
2841 2876 if (!contig && anyaddr && !pfnalign) {
2842 2877 flags &= ~PG_PHYSCONTIG;
2843 2878 plist = page_create_va(vp, off, npages * MMU_PAGESIZE,
2844 2879 flags, &kvseg, vaddr);
2845 2880 if (plist != NULL) {
2846 2881 *npagesp = 0;
2847 2882 return (plist);
2848 2883 }
2849 2884 }
2850 2885 plist = NULL;
2851 2886 minctg = howmany(npages, sgllen);
2852 2887 while (npages > sgllen || getone) {
2853 2888 if (minctg > npages)
2854 2889 minctg = npages;
2855 2890 mcpl = NULL;
2856 2891 /*
2857 2892 * We could want contig pages with no address range limits.
2858 2893 */
2859 2894 if (anyaddr && contig) {
2860 2895 /*
2861 2896 * Look for free contig pages to satisfy the request.
2862 2897 */
2863 2898 mcpl = find_contig_free(minctg, flags, pfnseg,
2864 2899 pfnalign);
2865 2900 }
2866 2901 /*
2867 2902 * Try the reserved io pools next
2868 2903 */
2869 2904 if (mcpl == NULL)
2870 2905 mcpl = page_io_pool_alloc(mattr, contig, minctg);
2871 2906 if (mcpl != NULL) {
2872 2907 pp = mcpl;
2873 2908 do {
2874 2909 if (!page_hashin(pp, vp, off, NULL)) {
2875 2910 panic("page_get_contigpages:"
2876 2911 " hashin failed"
2877 2912 " pp %p, vp %p, off %llx",
2878 2913 (void *)pp, (void *)vp, off);
2879 2914 }
2880 2915 off += MMU_PAGESIZE;
2881 2916 PP_CLRFREE(pp);
2882 2917 PP_CLRAGED(pp);
2883 2918 page_set_props(pp, P_REF);
2884 2919 page_io_lock(pp);
2885 2920 pp = pp->p_next;
2886 2921 } while (pp != mcpl);
2887 2922 } else {
2888 2923 /*
2889 2924 * Hypervisor exchange doesn't handle segment or
2890 2925 * alignment constraints
2891 2926 */
2892 2927 if (mattr->dma_attr_seg < mattr->dma_attr_addr_hi ||
2893 2928 pfnalign)
2894 2929 goto fail;
2895 2930 /*
2896 2931 * Try exchanging pages with the hypervisor
2897 2932 */
2898 2933 mcpl = page_swap_with_hypervisor(vp, off, vaddr, mattr,
2899 2934 flags, minctg);
2900 2935 if (mcpl == NULL)
2901 2936 goto fail;
2902 2937 off += minctg * MMU_PAGESIZE;
2903 2938 }
2904 2939 check_dma(mattr, mcpl, minctg);
2905 2940 /*
2906 2941 * Here with a minctg run of contiguous pages, add them to the
2907 2942 * list we will return for this request.
2908 2943 */
2909 2944 page_list_concat(&plist, &mcpl);
2910 2945 npages -= minctg;
2911 2946 *npagesp = npages;
2912 2947 sgllen--;
2913 2948 if (getone)
2914 2949 break;
2915 2950 }
2916 2951 return (plist);
2917 2952 fail:
2918 2953 return_partial_alloc(plist);
2919 2954 return (NULL);
2920 2955 }
2921 2956
2922 2957 /*
2923 2958 * Allocator for domain 0 I/O pages. We match the required
2924 2959 * DMA attributes and contiguity constraints.
2925 2960 */
2926 2961 /*ARGSUSED*/
2927 2962 page_t *
2928 2963 page_create_io(
2929 2964 struct vnode *vp,
2930 2965 u_offset_t off,
2931 2966 uint_t bytes,
2932 2967 uint_t flags,
2933 2968 struct as *as,
2934 2969 caddr_t vaddr,
2935 2970 ddi_dma_attr_t *mattr)
2936 2971 {
2937 2972 page_t *plist = NULL, *pp;
2938 2973 int npages = 0, contig, anyaddr, pages_req;
2939 2974 mfn_t lo_mfn;
2940 2975 mfn_t hi_mfn;
2941 2976 pgcnt_t pfnalign = 0;
2942 2977 int align;
2943 2978 int is_domu = 0;
2944 2979 int dummy, bytes_got;
2945 2980 mfn_t max_mfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
2946 2981
2947 2982 ASSERT(mattr != NULL);
2948 2983 lo_mfn = mmu_btop(mattr->dma_attr_addr_lo);
2949 2984 hi_mfn = mmu_btop(mattr->dma_attr_addr_hi);
2950 2985 align = maxbit(mattr->dma_attr_align, mattr->dma_attr_minxfer);
2951 2986 if (align > MMU_PAGESIZE)
2952 2987 pfnalign = mmu_btop(align);
2953 2988
2954 2989 /*
2955 2990 * Clear the contig flag if only one page is needed or the scatter
2956 2991 * gather list length is >= npages.
2957 2992 */
2958 2993 pages_req = npages = mmu_btopr(bytes);
2959 2994 contig = (flags & PG_PHYSCONTIG);
2960 2995 bytes = P2ROUNDUP(bytes, MMU_PAGESIZE);
2961 2996 if (bytes == MMU_PAGESIZE || mattr->dma_attr_sgllen >= npages)
2962 2997 contig = 0;
2963 2998
2964 2999 /*
2965 3000 * Check if any old page in the system is fine.
2966 3001 * DomU should always go down this path.
2967 3002 */
2968 3003 is_domu = !DOMAIN_IS_INITDOMAIN(xen_info);
2969 3004 anyaddr = lo_mfn == 0 && hi_mfn >= max_mfn && !pfnalign;
2970 3005 if ((!contig && anyaddr) || is_domu) {
2971 3006 flags &= ~PG_PHYSCONTIG;
2972 3007 plist = page_create_va(vp, off, bytes, flags, &kvseg, vaddr);
2973 3008 if (plist != NULL)
2974 3009 return (plist);
2975 3010 else if (is_domu)
2976 3011 return (NULL); /* no memory available */
2977 3012 }
2978 3013 /*
2979 3014 * DomU should never reach here
2980 3015 */
2981 3016 if (contig) {
2982 3017 plist = page_get_contigpages(vp, off, &npages, flags, vaddr,
2983 3018 mattr);
2984 3019 if (plist == NULL)
2985 3020 goto fail;
2986 3021 bytes_got = (pages_req - npages) << MMU_PAGESHIFT;
2987 3022 vaddr += bytes_got;
2988 3023 off += bytes_got;
2989 3024 /*
2990 3025 * We now have all the contiguous pages we need, but
2991 3026 * we may still need additional non-contiguous pages.
2992 3027 */
2993 3028 }
2994 3029 /*
2995 3030 * now loop collecting the requested number of pages, these do
2996 3031 * not have to be contiguous pages but we will use the contig
2997 3032 * page alloc code to get the pages since it will honor any
2998 3033 * other constraints the pages may have.
2999 3034 */
3000 3035 while (npages--) {
3001 3036 dummy = -1;
3002 3037 pp = page_get_contigpages(vp, off, &dummy, flags, vaddr, mattr);
3003 3038 if (pp == NULL)
3004 3039 goto fail;
3005 3040 page_add(&plist, pp);
3006 3041 vaddr += MMU_PAGESIZE;
3007 3042 off += MMU_PAGESIZE;
3008 3043 }
3009 3044 return (plist);
3010 3045 fail:
3011 3046 /*
3012 3047 * Failed to get enough pages, return ones we did get
3013 3048 */
3014 3049 return_partial_alloc(plist);
3015 3050 return (NULL);
3016 3051 }
3017 3052
3018 3053 /*
3019 3054 * Lock and return the page with the highest mfn that we can find. last_mfn
3020 3055 * holds the last one found, so the next search can start from there. We
3021 3056 * also keep a counter so that we don't loop forever if the machine has no
3022 3057 * free pages.
3023 3058 *
3024 3059 * This is called from the balloon thread to find pages to give away. new_high
3025 3060 * is used when new mfn's have been added to the system - we will reset our
3026 3061 * search if the new mfn's are higher than our current search position.
3027 3062 */
3028 3063 page_t *
3029 3064 page_get_high_mfn(mfn_t new_high)
3030 3065 {
3031 3066 static mfn_t last_mfn = 0;
3032 3067 pfn_t pfn;
3033 3068 page_t *pp;
3034 3069 ulong_t loop_count = 0;
3035 3070
3036 3071 if (new_high > last_mfn)
3037 3072 last_mfn = new_high;
3038 3073
3039 3074 for (; loop_count < mfn_count; loop_count++, last_mfn--) {
3040 3075 if (last_mfn == 0) {
3041 3076 last_mfn = cached_max_mfn;
3042 3077 }
3043 3078
3044 3079 pfn = mfn_to_pfn(last_mfn);
3045 3080 if (pfn & PFN_IS_FOREIGN_MFN)
3046 3081 continue;
3047 3082
3048 3083 /* See if the page is free. If so, lock it. */
3049 3084 pp = page_numtopp_alloc(pfn);
3050 3085 if (pp == NULL)
3051 3086 continue;
3052 3087 PP_CLRFREE(pp);
3053 3088
3054 3089 ASSERT(PAGE_EXCL(pp));
3055 3090 ASSERT(pp->p_vnode == NULL);
3056 3091 ASSERT(!hat_page_is_mapped(pp));
3057 3092 last_mfn--;
3058 3093 return (pp);
3059 3094 }
3060 3095 return (NULL);
3061 3096 }
3062 3097
3063 3098 #else /* !__xpv */
3064 3099
3065 3100 /*
3066 3101 * get a page from any list with the given mnode
3067 3102 */
3068 3103 static page_t *
3069 3104 page_get_mnode_anylist(ulong_t origbin, uchar_t szc, uint_t flags,
3070 3105 int mnode, int mtype, ddi_dma_attr_t *dma_attr)
3071 3106 {
3072 3107 kmutex_t *pcm;
3073 3108 int i;
3074 3109 page_t *pp;
3075 3110 page_t *first_pp;
3076 3111 uint64_t pgaddr;
3077 3112 ulong_t bin;
3078 3113 int mtypestart;
3079 3114 int plw_initialized;
3080 3115 page_list_walker_t plw;
3081 3116
3082 3117 VM_STAT_ADD(pga_vmstats.pgma_alloc);
3083 3118
3084 3119 ASSERT((flags & PG_MATCH_COLOR) == 0);
3085 3120 ASSERT(szc == 0);
3086 3121 ASSERT(dma_attr != NULL);
3087 3122
3088 3123 MTYPE_START(mnode, mtype, flags);
3089 3124 if (mtype < 0) {
3090 3125 VM_STAT_ADD(pga_vmstats.pgma_allocempty);
3091 3126 return (NULL);
3092 3127 }
3093 3128
3094 3129 mtypestart = mtype;
3095 3130
3096 3131 bin = origbin;
3097 3132
3098 3133 /*
3099 3134 * check up to page_colors + 1 bins - origbin may be checked twice
3100 3135 * because of BIN_STEP skip
3101 3136 */
3102 3137 do {
3103 3138 plw_initialized = 0;
3104 3139
3105 3140 for (plw.plw_count = 0;
3106 3141 plw.plw_count < page_colors; plw.plw_count++) {
3107 3142
3108 3143 if (PAGE_FREELISTS(mnode, szc, bin, mtype) == NULL)
3109 3144 goto nextfreebin;
3110 3145
3111 3146 pcm = PC_BIN_MUTEX(mnode, bin, PG_FREE_LIST);
3112 3147 mutex_enter(pcm);
3113 3148 pp = PAGE_FREELISTS(mnode, szc, bin, mtype);
3114 3149 first_pp = pp;
3115 3150 while (pp != NULL) {
3116 3151 if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3117 3152 SE_EXCL) == 0) {
3118 3153 pp = pp->p_next;
3119 3154 if (pp == first_pp) {
3120 3155 pp = NULL;
3121 3156 }
3122 3157 continue;
3123 3158 }
3124 3159
3125 3160 ASSERT(PP_ISFREE(pp));
3126 3161 ASSERT(PP_ISAGED(pp));
3127 3162 ASSERT(pp->p_vnode == NULL);
3128 3163 ASSERT(pp->p_hash == NULL);
3129 3164 ASSERT(pp->p_offset == (u_offset_t)-1);
3130 3165 ASSERT(pp->p_szc == szc);
3131 3166 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3132 3167 /* check if page within DMA attributes */
3133 3168 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3134 3169 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3135 3170 (pgaddr + MMU_PAGESIZE - 1 <=
3136 3171 dma_attr->dma_attr_addr_hi)) {
3137 3172 break;
3138 3173 }
3139 3174
3140 3175 /* continue looking */
3141 3176 page_unlock(pp);
3142 3177 pp = pp->p_next;
3143 3178 if (pp == first_pp)
3144 3179 pp = NULL;
3145 3180
3146 3181 }
3147 3182 if (pp != NULL) {
3148 3183 ASSERT(mtype == PP_2_MTYPE(pp));
3149 3184 ASSERT(pp->p_szc == 0);
3150 3185
3151 3186 /* found a page with specified DMA attributes */
3152 3187 page_sub(&PAGE_FREELISTS(mnode, szc, bin,
3153 3188 mtype), pp);
3154 3189 page_ctr_sub(mnode, mtype, pp, PG_FREE_LIST);
3155 3190
3156 3191 if ((PP_ISFREE(pp) == 0) ||
3157 3192 (PP_ISAGED(pp) == 0)) {
3158 3193 cmn_err(CE_PANIC, "page %p is not free",
3159 3194 (void *)pp);
3160 3195 }
3161 3196
3162 3197 mutex_exit(pcm);
3163 3198 check_dma(dma_attr, pp, 1);
3164 3199 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3165 3200 return (pp);
3166 3201 }
3167 3202 mutex_exit(pcm);
3168 3203 nextfreebin:
3169 3204 if (plw_initialized == 0) {
3170 3205 page_list_walk_init(szc, 0, bin, 1, 0, &plw);
3171 3206 ASSERT(plw.plw_ceq_dif == page_colors);
3172 3207 plw_initialized = 1;
3173 3208 }
3174 3209
3175 3210 if (plw.plw_do_split) {
3176 3211 pp = page_freelist_split(szc, bin, mnode,
3177 3212 mtype,
3178 3213 mmu_btop(dma_attr->dma_attr_addr_lo),
3179 3214 mmu_btop(dma_attr->dma_attr_addr_hi + 1),
3180 3215 &plw);
3181 3216 if (pp != NULL) {
3182 3217 check_dma(dma_attr, pp, 1);
3183 3218 return (pp);
3184 3219 }
3185 3220 }
3186 3221
3187 3222 bin = page_list_walk_next_bin(szc, bin, &plw);
3188 3223 }
3189 3224
3190 3225 MTYPE_NEXT(mnode, mtype, flags);
3191 3226 } while (mtype >= 0);
3192 3227
3193 3228 /* failed to find a page in the freelist; try it in the cachelist */
3194 3229
3195 3230 /* reset mtype start for cachelist search */
3196 3231 mtype = mtypestart;
3197 3232 ASSERT(mtype >= 0);
3198 3233
3199 3234 /* start with the bin of matching color */
3200 3235 bin = origbin;
3201 3236
3202 3237 do {
3203 3238 for (i = 0; i <= page_colors; i++) {
3204 3239 if (PAGE_CACHELISTS(mnode, bin, mtype) == NULL)
3205 3240 goto nextcachebin;
3206 3241 pcm = PC_BIN_MUTEX(mnode, bin, PG_CACHE_LIST);
3207 3242 mutex_enter(pcm);
3208 3243 pp = PAGE_CACHELISTS(mnode, bin, mtype);
3209 3244 first_pp = pp;
3210 3245 while (pp != NULL) {
3211 3246 if (IS_DUMP_PAGE(pp) || page_trylock(pp,
3212 3247 SE_EXCL) == 0) {
3213 3248 pp = pp->p_next;
3214 3249 if (pp == first_pp)
3215 3250 pp = NULL;
3216 3251 continue;
3217 3252 }
3218 3253 ASSERT(pp->p_vnode);
3219 3254 ASSERT(PP_ISAGED(pp) == 0);
3220 3255 ASSERT(pp->p_szc == 0);
3221 3256 ASSERT(PFN_2_MEM_NODE(pp->p_pagenum) == mnode);
3222 3257
3223 3258 /* check if page within DMA attributes */
3224 3259
3225 3260 pgaddr = pa_to_ma(pfn_to_pa(pp->p_pagenum));
3226 3261 if ((pgaddr >= dma_attr->dma_attr_addr_lo) &&
3227 3262 (pgaddr + MMU_PAGESIZE - 1 <=
3228 3263 dma_attr->dma_attr_addr_hi)) {
3229 3264 break;
3230 3265 }
3231 3266
3232 3267 /* continue looking */
3233 3268 page_unlock(pp);
3234 3269 pp = pp->p_next;
3235 3270 if (pp == first_pp)
3236 3271 pp = NULL;
3237 3272 }
3238 3273
3239 3274 if (pp != NULL) {
3240 3275 ASSERT(mtype == PP_2_MTYPE(pp));
3241 3276 ASSERT(pp->p_szc == 0);
3242 3277
3243 3278 /* found a page with specified DMA attributes */
3244 3279 page_sub(&PAGE_CACHELISTS(mnode, bin,
3245 3280 mtype), pp);
3246 3281 page_ctr_sub(mnode, mtype, pp, PG_CACHE_LIST);
3247 3282
3248 3283 mutex_exit(pcm);
3249 3284 ASSERT(pp->p_vnode);
3250 3285 ASSERT(PP_ISAGED(pp) == 0);
3251 3286 check_dma(dma_attr, pp, 1);
3252 3287 VM_STAT_ADD(pga_vmstats.pgma_allocok);
3253 3288 return (pp);
3254 3289 }
3255 3290 mutex_exit(pcm);
3256 3291 nextcachebin:
3257 3292 bin += (i == 0) ? BIN_STEP : 1;
3258 3293 bin &= page_colors_mask;
3259 3294 }
3260 3295 MTYPE_NEXT(mnode, mtype, flags);
3261 3296 } while (mtype >= 0);
3262 3297
3263 3298 VM_STAT_ADD(pga_vmstats.pgma_allocfailed);
3264 3299 return (NULL);
3265 3300 }
3266 3301
3267 3302 /*
3268 3303 * This function is similar to page_get_freelist()/page_get_cachelist()
3269 3304 * but it searches both the lists to find a page with the specified
3270 3305 * color (or no color) and DMA attributes. The search is done in the
3271 3306 * freelist first and then in the cache list within the highest memory
3272 3307 * range (based on DMA attributes) before searching in the lower
3273 3308 * memory ranges.
3274 3309 *
3275 3310 * Note: This function is called only by page_create_io().
3276 3311 */
3277 3312 /*ARGSUSED*/
3278 3313 static page_t *
3279 3314 page_get_anylist(struct vnode *vp, u_offset_t off, struct as *as, caddr_t vaddr,
3280 3315 size_t size, uint_t flags, ddi_dma_attr_t *dma_attr, lgrp_t *lgrp)
3281 3316 {
3282 3317 uint_t bin;
3283 3318 int mtype;
3284 3319 page_t *pp;
3285 3320 int n;
3286 3321 int m;
3287 3322 int szc;
3288 3323 int fullrange;
3289 3324 int mnode;
3290 3325 int local_failed_stat = 0;
3291 3326 lgrp_mnode_cookie_t lgrp_cookie;
3292 3327
3293 3328 VM_STAT_ADD(pga_vmstats.pga_alloc);
3294 3329
3295 3330 /* only base pagesize currently supported */
3296 3331 if (size != MMU_PAGESIZE)
3297 3332 return (NULL);
3298 3333
3299 3334 /*
3300 3335 * If we're passed a specific lgroup, we use it. Otherwise,
3301 3336 * assume first-touch placement is desired.
3302 3337 */
3303 3338 if (!LGRP_EXISTS(lgrp))
3304 3339 lgrp = lgrp_home_lgrp();
3305 3340
3306 3341 /* LINTED */
3307 3342 AS_2_BIN(as, seg, vp, vaddr, bin, 0);
3308 3343
3309 3344 /*
3310 3345 * Only hold one freelist or cachelist lock at a time, that way we
3311 3346 * can start anywhere and not have to worry about lock
3312 3347 * ordering.
3313 3348 */
3314 3349 if (dma_attr == NULL) {
3315 3350 n = mtype16m;
3316 3351 m = mtypetop;
3317 3352 fullrange = 1;
3318 3353 VM_STAT_ADD(pga_vmstats.pga_nulldmaattr);
3319 3354 } else {
3320 3355 pfn_t pfnlo = mmu_btop(dma_attr->dma_attr_addr_lo);
3321 3356 pfn_t pfnhi = mmu_btop(dma_attr->dma_attr_addr_hi);
3322 3357
3323 3358 /*
3324 3359 * We can guarantee alignment only for page boundary.
3325 3360 */
3326 3361 if (dma_attr->dma_attr_align > MMU_PAGESIZE)
3327 3362 return (NULL);
3328 3363
3329 3364 /* Sanity check the dma_attr */
3330 3365 if (pfnlo > pfnhi)
3331 3366 return (NULL);
3332 3367
3333 3368 n = pfn_2_mtype(pfnlo);
3334 3369 m = pfn_2_mtype(pfnhi);
3335 3370
3336 3371 fullrange = ((pfnlo == mnoderanges[n].mnr_pfnlo) &&
3337 3372 (pfnhi >= mnoderanges[m].mnr_pfnhi));
3338 3373 }
3339 3374 VM_STAT_COND_ADD(fullrange == 0, pga_vmstats.pga_notfullrange);
3340 3375
3341 3376 szc = 0;
3342 3377
3343 3378 /* cylcing thru mtype handled by RANGE0 if n == mtype16m */
3344 3379 if (n == mtype16m) {
3345 3380 flags |= PGI_MT_RANGE0;
3346 3381 n = m;
3347 3382 }
3348 3383
3349 3384 /*
3350 3385 * Try local memory node first, but try remote if we can't
3351 3386 * get a page of the right color.
3352 3387 */
3353 3388 LGRP_MNODE_COOKIE_INIT(lgrp_cookie, lgrp, LGRP_SRCH_HIER);
3354 3389 while ((mnode = lgrp_memnode_choose(&lgrp_cookie)) >= 0) {
3355 3390 /*
3356 3391 * allocate pages from high pfn to low.
3357 3392 */
3358 3393 mtype = m;
3359 3394 do {
3360 3395 if (fullrange != 0) {
3361 3396 pp = page_get_mnode_freelist(mnode,
3362 3397 bin, mtype, szc, flags);
3363 3398 if (pp == NULL) {
3364 3399 pp = page_get_mnode_cachelist(
3365 3400 bin, flags, mnode, mtype);
3366 3401 }
3367 3402 } else {
3368 3403 pp = page_get_mnode_anylist(bin, szc,
3369 3404 flags, mnode, mtype, dma_attr);
3370 3405 }
3371 3406 if (pp != NULL) {
3372 3407 VM_STAT_ADD(pga_vmstats.pga_allocok);
3373 3408 check_dma(dma_attr, pp, 1);
3374 3409 return (pp);
3375 3410 }
3376 3411 } while (mtype != n &&
3377 3412 (mtype = mnoderanges[mtype].mnr_next) != -1);
3378 3413 if (!local_failed_stat) {
3379 3414 lgrp_stat_add(lgrp->lgrp_id, LGRP_NUM_ALLOC_FAIL, 1);
3380 3415 local_failed_stat = 1;
3381 3416 }
3382 3417 }
3383 3418 VM_STAT_ADD(pga_vmstats.pga_allocfailed);
3384 3419
3385 3420 return (NULL);
3386 3421 }
3387 3422
3388 3423 /*
3389 3424 * page_create_io()
3390 3425 *
3391 3426 * This function is a copy of page_create_va() with an additional
3392 3427 * argument 'mattr' that specifies DMA memory requirements to
3393 3428 * the page list functions. This function is used by the segkmem
3394 3429 * allocator so it is only to create new pages (i.e PG_EXCL is
3395 3430 * set).
3396 3431 *
3397 3432 * Note: This interface is currently used by x86 PSM only and is
3398 3433 * not fully specified so the commitment level is only for
3399 3434 * private interface specific to x86. This interface uses PSM
3400 3435 * specific page_get_anylist() interface.
3401 3436 */
3402 3437
3403 3438 #define PAGE_HASH_SEARCH(index, pp, vp, off) { \
3404 3439 for ((pp) = page_hash[(index)]; (pp); (pp) = (pp)->p_hash) { \
3405 3440 if ((pp)->p_vnode == (vp) && (pp)->p_offset == (off)) \
3406 3441 break; \
3407 3442 } \
3408 3443 }
3409 3444
3410 3445
3411 3446 page_t *
3412 3447 page_create_io(
3413 3448 struct vnode *vp,
3414 3449 u_offset_t off,
3415 3450 uint_t bytes,
3416 3451 uint_t flags,
3417 3452 struct as *as,
3418 3453 caddr_t vaddr,
3419 3454 ddi_dma_attr_t *mattr) /* DMA memory attributes if any */
3420 3455 {
3421 3456 page_t *plist = NULL;
3422 3457 uint_t plist_len = 0;
3423 3458 pgcnt_t npages;
3424 3459 page_t *npp = NULL;
3425 3460 uint_t pages_req;
3426 3461 page_t *pp;
3427 3462 kmutex_t *phm = NULL;
3428 3463 uint_t index;
3429 3464
3430 3465 TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
3431 3466 "page_create_start:vp %p off %llx bytes %u flags %x",
3432 3467 vp, off, bytes, flags);
3433 3468
3434 3469 ASSERT((flags & ~(PG_EXCL | PG_WAIT | PG_PHYSCONTIG)) == 0);
3435 3470
3436 3471 pages_req = npages = mmu_btopr(bytes);
3437 3472
3438 3473 /*
3439 3474 * Do the freemem and pcf accounting.
3440 3475 */
3441 3476 if (!page_create_wait(npages, flags)) {
3442 3477 return (NULL);
3443 3478 }
3444 3479
3445 3480 TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
3446 3481 "page_create_success:vp %p off %llx", vp, off);
3447 3482
3448 3483 /*
3449 3484 * If satisfying this request has left us with too little
3450 3485 * memory, start the wheels turning to get some back. The
3451 3486 * first clause of the test prevents waking up the pageout
3452 3487 * daemon in situations where it would decide that there's
3453 3488 * nothing to do.
3454 3489 */
3455 3490 if (nscan < desscan && freemem < minfree) {
3456 3491 TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
3457 3492 "pageout_cv_signal:freemem %ld", freemem);
3458 3493 cv_signal(&proc_pageout->p_cv);
3459 3494 }
3460 3495
3461 3496 if (flags & PG_PHYSCONTIG) {
3462 3497
3463 3498 plist = page_get_contigpage(&npages, mattr, 1);
3464 3499 if (plist == NULL) {
3465 3500 page_create_putback(npages);
3466 3501 return (NULL);
3467 3502 }
3468 3503
3469 3504 pp = plist;
3470 3505
3471 3506 do {
3472 3507 if (!page_hashin(pp, vp, off, NULL)) {
3473 3508 panic("pg_creat_io: hashin failed %p %p %llx",
3474 3509 (void *)pp, (void *)vp, off);
3475 3510 }
3476 3511 VM_STAT_ADD(page_create_new);
3477 3512 off += MMU_PAGESIZE;
3478 3513 PP_CLRFREE(pp);
3479 3514 PP_CLRAGED(pp);
3480 3515 page_set_props(pp, P_REF);
3481 3516 pp = pp->p_next;
3482 3517 } while (pp != plist);
3483 3518
3484 3519 if (!npages) {
3485 3520 check_dma(mattr, plist, pages_req);
3486 3521 return (plist);
3487 3522 } else {
3488 3523 vaddr += (pages_req - npages) << MMU_PAGESHIFT;
3489 3524 }
3490 3525
3491 3526 /*
3492 3527 * fall-thru:
3493 3528 *
3494 3529 * page_get_contigpage returns when npages <= sgllen.
3495 3530 * Grab the rest of the non-contig pages below from anylist.
3496 3531 */
3497 3532 }
3498 3533
3499 3534 /*
3500 3535 * Loop around collecting the requested number of pages.
3501 3536 * Most of the time, we have to `create' a new page. With
3502 3537 * this in mind, pull the page off the free list before
3503 3538 * getting the hash lock. This will minimize the hash
3504 3539 * lock hold time, nesting, and the like. If it turns
3505 3540 * out we don't need the page, we put it back at the end.
3506 3541 */
3507 3542 while (npages--) {
3508 3543 phm = NULL;
3509 3544
3510 3545 index = PAGE_HASH_FUNC(vp, off);
3511 3546 top:
3512 3547 ASSERT(phm == NULL);
3513 3548 ASSERT(index == PAGE_HASH_FUNC(vp, off));
3514 3549 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3515 3550
3516 3551 if (npp == NULL) {
3517 3552 /*
3518 3553 * Try to get the page of any color either from
3519 3554 * the freelist or from the cache list.
3520 3555 */
3521 3556 npp = page_get_anylist(vp, off, as, vaddr, MMU_PAGESIZE,
3522 3557 flags & ~PG_MATCH_COLOR, mattr, NULL);
3523 3558 if (npp == NULL) {
3524 3559 if (mattr == NULL) {
3525 3560 /*
3526 3561 * Not looking for a special page;
3527 3562 * panic!
3528 3563 */
3529 3564 panic("no page found %d", (int)npages);
3530 3565 }
3531 3566 /*
3532 3567 * No page found! This can happen
3533 3568 * if we are looking for a page
3534 3569 * within a specific memory range
3535 3570 * for DMA purposes. If PG_WAIT is
3536 3571 * specified then we wait for a
3537 3572 * while and then try again. The
3538 3573 * wait could be forever if we
3539 3574 * don't get the page(s) we need.
3540 3575 *
3541 3576 * Note: XXX We really need a mechanism
3542 3577 * to wait for pages in the desired
3543 3578 * range. For now, we wait for any
3544 3579 * pages and see if we can use it.
3545 3580 */
3546 3581
3547 3582 if ((mattr != NULL) && (flags & PG_WAIT)) {
3548 3583 delay(10);
3549 3584 goto top;
3550 3585 }
3551 3586 goto fail; /* undo accounting stuff */
3552 3587 }
3553 3588
3554 3589 if (PP_ISAGED(npp) == 0) {
3555 3590 /*
3556 3591 * Since this page came from the
3557 3592 * cachelist, we must destroy the
3558 3593 * old vnode association.
3559 3594 */
3560 3595 page_hashout(npp, (kmutex_t *)NULL);
3561 3596 }
3562 3597 }
3563 3598
3564 3599 /*
3565 3600 * We own this page!
3566 3601 */
3567 3602 ASSERT(PAGE_EXCL(npp));
3568 3603 ASSERT(npp->p_vnode == NULL);
3569 3604 ASSERT(!hat_page_is_mapped(npp));
3570 3605 PP_CLRFREE(npp);
3571 3606 PP_CLRAGED(npp);
3572 3607
3573 3608 /*
3574 3609 * Here we have a page in our hot little mits and are
3575 3610 * just waiting to stuff it on the appropriate lists.
3576 3611 * Get the mutex and check to see if it really does
3577 3612 * not exist.
3578 3613 */
3579 3614 phm = PAGE_HASH_MUTEX(index);
3580 3615 mutex_enter(phm);
3581 3616 PAGE_HASH_SEARCH(index, pp, vp, off);
3582 3617 if (pp == NULL) {
3583 3618 VM_STAT_ADD(page_create_new);
3584 3619 pp = npp;
3585 3620 npp = NULL;
3586 3621 if (!page_hashin(pp, vp, off, phm)) {
3587 3622 /*
3588 3623 * Since we hold the page hash mutex and
3589 3624 * just searched for this page, page_hashin
3590 3625 * had better not fail. If it does, that
3591 3626 * means somethread did not follow the
3592 3627 * page hash mutex rules. Panic now and
3593 3628 * get it over with. As usual, go down
3594 3629 * holding all the locks.
3595 3630 */
3596 3631 ASSERT(MUTEX_HELD(phm));
3597 3632 panic("page_create: hashin fail %p %p %llx %p",
3598 3633 (void *)pp, (void *)vp, off, (void *)phm);
3599 3634
3600 3635 }
3601 3636 ASSERT(MUTEX_HELD(phm));
3602 3637 mutex_exit(phm);
3603 3638 phm = NULL;
3604 3639
3605 3640 /*
3606 3641 * Hat layer locking need not be done to set
3607 3642 * the following bits since the page is not hashed
3608 3643 * and was on the free list (i.e., had no mappings).
3609 3644 *
3610 3645 * Set the reference bit to protect
3611 3646 * against immediate pageout
3612 3647 *
3613 3648 * XXXmh modify freelist code to set reference
3614 3649 * bit so we don't have to do it here.
3615 3650 */
3616 3651 page_set_props(pp, P_REF);
3617 3652 } else {
3618 3653 ASSERT(MUTEX_HELD(phm));
3619 3654 mutex_exit(phm);
3620 3655 phm = NULL;
3621 3656 /*
3622 3657 * NOTE: This should not happen for pages associated
3623 3658 * with kernel vnode 'kvp'.
3624 3659 */
3625 3660 /* XX64 - to debug why this happens! */
3626 3661 ASSERT(!VN_ISKAS(vp));
3627 3662 if (VN_ISKAS(vp))
3628 3663 cmn_err(CE_NOTE,
3629 3664 "page_create: page not expected "
3630 3665 "in hash list for kernel vnode - pp 0x%p",
3631 3666 (void *)pp);
3632 3667 VM_STAT_ADD(page_create_exists);
3633 3668 goto fail;
3634 3669 }
3635 3670
3636 3671 /*
3637 3672 * Got a page! It is locked. Acquire the i/o
3638 3673 * lock since we are going to use the p_next and
3639 3674 * p_prev fields to link the requested pages together.
3640 3675 */
3641 3676 page_io_lock(pp);
3642 3677 page_add(&plist, pp);
3643 3678 plist = plist->p_next;
3644 3679 off += MMU_PAGESIZE;
3645 3680 vaddr += MMU_PAGESIZE;
3646 3681 }
3647 3682
3648 3683 check_dma(mattr, plist, pages_req);
3649 3684 return (plist);
3650 3685
3651 3686 fail:
3652 3687 if (npp != NULL) {
3653 3688 /*
3654 3689 * Did not need this page after all.
3655 3690 * Put it back on the free list.
3656 3691 */
3657 3692 VM_STAT_ADD(page_create_putbacks);
3658 3693 PP_SETFREE(npp);
3659 3694 PP_SETAGED(npp);
3660 3695 npp->p_offset = (u_offset_t)-1;
3661 3696 page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
3662 3697 page_unlock(npp);
3663 3698 }
3664 3699
3665 3700 /*
3666 3701 * Give up the pages we already got.
3667 3702 */
3668 3703 while (plist != NULL) {
3669 3704 pp = plist;
3670 3705 page_sub(&plist, pp);
3671 3706 page_io_unlock(pp);
3672 3707 plist_len++;
3673 3708 /*LINTED: constant in conditional ctx*/
3674 3709 VN_DISPOSE(pp, B_INVAL, 0, kcred);
3675 3710 }
3676 3711
3677 3712 /*
3678 3713 * VN_DISPOSE does freemem accounting for the pages in plist
3679 3714 * by calling page_free. So, we need to undo the pcf accounting
3680 3715 * for only the remaining pages.
3681 3716 */
3682 3717 VM_STAT_ADD(page_create_putbacks);
3683 3718 page_create_putback(pages_req - plist_len);
3684 3719
3685 3720 return (NULL);
3686 3721 }
3687 3722 #endif /* !__xpv */
3688 3723
3689 3724
3690 3725 /*
3691 3726 * Copy the data from the physical page represented by "frompp" to
3692 3727 * that represented by "topp". ppcopy uses CPU->cpu_caddr1 and
3693 3728 * CPU->cpu_caddr2. It assumes that no one uses either map at interrupt
3694 3729 * level and no one sleeps with an active mapping there.
3695 3730 *
3696 3731 * Note that the ref/mod bits in the page_t's are not affected by
3697 3732 * this operation, hence it is up to the caller to update them appropriately.
3698 3733 */
3699 3734 int
3700 3735 ppcopy(page_t *frompp, page_t *topp)
3701 3736 {
3702 3737 caddr_t pp_addr1;
3703 3738 caddr_t pp_addr2;
3704 3739 hat_mempte_t pte1;
3705 3740 hat_mempte_t pte2;
3706 3741 kmutex_t *ppaddr_mutex;
3707 3742 label_t ljb;
3708 3743 int ret = 1;
3709 3744
3710 3745 ASSERT_STACK_ALIGNED();
3711 3746 ASSERT(PAGE_LOCKED(frompp));
3712 3747 ASSERT(PAGE_LOCKED(topp));
3713 3748
3714 3749 if (kpm_enable) {
3715 3750 pp_addr1 = hat_kpm_page2va(frompp, 0);
3716 3751 pp_addr2 = hat_kpm_page2va(topp, 0);
3717 3752 kpreempt_disable();
3718 3753 } else {
3719 3754 /*
3720 3755 * disable pre-emption so that CPU can't change
3721 3756 */
3722 3757 kpreempt_disable();
3723 3758
3724 3759 pp_addr1 = CPU->cpu_caddr1;
3725 3760 pp_addr2 = CPU->cpu_caddr2;
3726 3761 pte1 = CPU->cpu_caddr1pte;
3727 3762 pte2 = CPU->cpu_caddr2pte;
3728 3763
3729 3764 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3730 3765 mutex_enter(ppaddr_mutex);
3731 3766
3732 3767 hat_mempte_remap(page_pptonum(frompp), pp_addr1, pte1,
3733 3768 PROT_READ | HAT_STORECACHING_OK, HAT_LOAD_NOCONSIST);
3734 3769 hat_mempte_remap(page_pptonum(topp), pp_addr2, pte2,
3735 3770 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3736 3771 HAT_LOAD_NOCONSIST);
3737 3772 }
3738 3773
3739 3774 if (on_fault(&ljb)) {
3740 3775 ret = 0;
3741 3776 goto faulted;
3742 3777 }
3743 3778 if (use_sse_pagecopy)
3744 3779 #ifdef __xpv
3745 3780 page_copy_no_xmm(pp_addr2, pp_addr1);
3746 3781 #else
3747 3782 hwblkpagecopy(pp_addr1, pp_addr2);
3748 3783 #endif
3749 3784 else
3750 3785 bcopy(pp_addr1, pp_addr2, PAGESIZE);
3751 3786
3752 3787 no_fault();
3753 3788 faulted:
3754 3789 if (!kpm_enable) {
3755 3790 #ifdef __xpv
3756 3791 /*
3757 3792 * We can't leave unused mappings laying about under the
3758 3793 * hypervisor, so blow them away.
3759 3794 */
3760 3795 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr1, 0,
3761 3796 UVMF_INVLPG | UVMF_LOCAL) < 0)
3762 3797 panic("HYPERVISOR_update_va_mapping() failed");
3763 3798 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3764 3799 UVMF_INVLPG | UVMF_LOCAL) < 0)
3765 3800 panic("HYPERVISOR_update_va_mapping() failed");
3766 3801 #endif
3767 3802 mutex_exit(ppaddr_mutex);
3768 3803 }
3769 3804 kpreempt_enable();
3770 3805 return (ret);
3771 3806 }
3772 3807
3773 3808 void
3774 3809 pagezero(page_t *pp, uint_t off, uint_t len)
3775 3810 {
3776 3811 ASSERT(PAGE_LOCKED(pp));
3777 3812 pfnzero(page_pptonum(pp), off, len);
3778 3813 }
3779 3814
3780 3815 /*
3781 3816 * Zero the physical page from off to off + len given by pfn
3782 3817 * without changing the reference and modified bits of page.
3783 3818 *
3784 3819 * We use this using CPU private page address #2, see ppcopy() for more info.
3785 3820 * pfnzero() must not be called at interrupt level.
3786 3821 */
3787 3822 void
3788 3823 pfnzero(pfn_t pfn, uint_t off, uint_t len)
3789 3824 {
3790 3825 caddr_t pp_addr2;
3791 3826 hat_mempte_t pte2;
3792 3827 kmutex_t *ppaddr_mutex = NULL;
3793 3828
3794 3829 ASSERT_STACK_ALIGNED();
3795 3830 ASSERT(len <= MMU_PAGESIZE);
3796 3831 ASSERT(off <= MMU_PAGESIZE);
3797 3832 ASSERT(off + len <= MMU_PAGESIZE);
3798 3833
3799 3834 if (kpm_enable && !pfn_is_foreign(pfn)) {
3800 3835 pp_addr2 = hat_kpm_pfn2va(pfn);
3801 3836 kpreempt_disable();
3802 3837 } else {
3803 3838 kpreempt_disable();
3804 3839
3805 3840 pp_addr2 = CPU->cpu_caddr2;
3806 3841 pte2 = CPU->cpu_caddr2pte;
3807 3842
3808 3843 ppaddr_mutex = &CPU->cpu_ppaddr_mutex;
3809 3844 mutex_enter(ppaddr_mutex);
3810 3845
3811 3846 hat_mempte_remap(pfn, pp_addr2, pte2,
3812 3847 PROT_READ | PROT_WRITE | HAT_STORECACHING_OK,
3813 3848 HAT_LOAD_NOCONSIST);
3814 3849 }
3815 3850
3816 3851 if (use_sse_pagezero) {
3817 3852 #ifdef __xpv
3818 3853 uint_t rem;
3819 3854
3820 3855 /*
3821 3856 * zero a byte at a time until properly aligned for
3822 3857 * block_zero_no_xmm().
3823 3858 */
3824 3859 while (!P2NPHASE(off, ((uint_t)BLOCKZEROALIGN)) && len-- > 0)
3825 3860 pp_addr2[off++] = 0;
3826 3861
3827 3862 /*
3828 3863 * Now use faster block_zero_no_xmm() for any range
3829 3864 * that is properly aligned and sized.
3830 3865 */
3831 3866 rem = P2PHASE(len, ((uint_t)BLOCKZEROALIGN));
3832 3867 len -= rem;
3833 3868 if (len != 0) {
3834 3869 block_zero_no_xmm(pp_addr2 + off, len);
3835 3870 off += len;
3836 3871 }
3837 3872
3838 3873 /*
3839 3874 * zero remainder with byte stores.
3840 3875 */
3841 3876 while (rem-- > 0)
3842 3877 pp_addr2[off++] = 0;
3843 3878 #else
3844 3879 hwblkclr(pp_addr2 + off, len);
3845 3880 #endif
3846 3881 } else {
3847 3882 bzero(pp_addr2 + off, len);
3848 3883 }
3849 3884
3850 3885 if (!kpm_enable || pfn_is_foreign(pfn)) {
3851 3886 #ifdef __xpv
3852 3887 /*
3853 3888 * On the hypervisor this page might get used for a page
3854 3889 * table before any intervening change to this mapping,
3855 3890 * so blow it away.
3856 3891 */
3857 3892 if (HYPERVISOR_update_va_mapping((uintptr_t)pp_addr2, 0,
3858 3893 UVMF_INVLPG) < 0)
3859 3894 panic("HYPERVISOR_update_va_mapping() failed");
3860 3895 #endif
3861 3896 mutex_exit(ppaddr_mutex);
3862 3897 }
3863 3898
3864 3899 kpreempt_enable();
3865 3900 }
3866 3901
3867 3902 /*
3868 3903 * Platform-dependent page scrub call.
3869 3904 */
3870 3905 void
3871 3906 pagescrub(page_t *pp, uint_t off, uint_t len)
3872 3907 {
3873 3908 /*
3874 3909 * For now, we rely on the fact that pagezero() will
3875 3910 * always clear UEs.
3876 3911 */
3877 3912 pagezero(pp, off, len);
3878 3913 }
3879 3914
3880 3915 /*
3881 3916 * set up two private addresses for use on a given CPU for use in ppcopy()
3882 3917 */
3883 3918 void
3884 3919 setup_vaddr_for_ppcopy(struct cpu *cpup)
3885 3920 {
3886 3921 void *addr;
3887 3922 hat_mempte_t pte_pa;
3888 3923
3889 3924 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3890 3925 pte_pa = hat_mempte_setup(addr);
3891 3926 cpup->cpu_caddr1 = addr;
3892 3927 cpup->cpu_caddr1pte = pte_pa;
3893 3928
3894 3929 addr = vmem_alloc(heap_arena, mmu_ptob(1), VM_SLEEP);
3895 3930 pte_pa = hat_mempte_setup(addr);
3896 3931 cpup->cpu_caddr2 = addr;
3897 3932 cpup->cpu_caddr2pte = pte_pa;
3898 3933
3899 3934 mutex_init(&cpup->cpu_ppaddr_mutex, NULL, MUTEX_DEFAULT, NULL);
3900 3935 }
3901 3936
3902 3937 /*
3903 3938 * Undo setup_vaddr_for_ppcopy
3904 3939 */
3905 3940 void
3906 3941 teardown_vaddr_for_ppcopy(struct cpu *cpup)
3907 3942 {
3908 3943 mutex_destroy(&cpup->cpu_ppaddr_mutex);
3909 3944
3910 3945 hat_mempte_release(cpup->cpu_caddr2, cpup->cpu_caddr2pte);
3911 3946 cpup->cpu_caddr2pte = 0;
3912 3947 vmem_free(heap_arena, cpup->cpu_caddr2, mmu_ptob(1));
3913 3948 cpup->cpu_caddr2 = 0;
3914 3949
3915 3950 hat_mempte_release(cpup->cpu_caddr1, cpup->cpu_caddr1pte);
3916 3951 cpup->cpu_caddr1pte = 0;
3917 3952 vmem_free(heap_arena, cpup->cpu_caddr1, mmu_ptob(1));
3918 3953 cpup->cpu_caddr1 = 0;
↓ open down ↓ |
2990 lines elided |
↑ open up ↑ |
3919 3954 }
3920 3955
3921 3956 /*
3922 3957 * Function for flushing D-cache when performing module relocations
3923 3958 * to an alternate mapping. Unnecessary on Intel / AMD platforms.
3924 3959 */
3925 3960 void
3926 3961 dcache_flushall()
3927 3962 {}
3928 3963
3929 -size_t
3930 -exec_get_spslew(void)
3931 -{
3932 - return (0);
3933 -}
3934 -
3935 3964 /*
3936 3965 * Allocate a memory page. The argument 'seed' can be any pseudo-random
3937 3966 * number to vary where the pages come from. This is quite a hacked up
3938 3967 * method -- it works for now, but really needs to be fixed up a bit.
3939 3968 *
3940 3969 * We currently use page_create_va() on the kvp with fake offsets,
3941 3970 * segments and virt address. This is pretty bogus, but was copied from the
3942 3971 * old hat_i86.c code. A better approach would be to specify either mnode
3943 3972 * random or mnode local and takes a page from whatever color has the MOST
3944 3973 * available - this would have a minimal impact on page coloring.
3945 3974 */
3946 3975 page_t *
3947 3976 page_get_physical(uintptr_t seed)
3948 3977 {
3949 3978 page_t *pp;
3950 3979 u_offset_t offset;
3951 3980 static struct seg tmpseg;
3952 3981 static uintptr_t ctr = 0;
3953 3982
3954 3983 /*
3955 3984 * This code is gross, we really need a simpler page allocator.
3956 3985 *
3957 3986 * We need to assign an offset for the page to call page_create_va()
3958 3987 * To avoid conflicts with other pages, we get creative with the offset.
3959 3988 * For 32 bits, we need an offset > 4Gig
3960 3989 * For 64 bits, need an offset somewhere in the VA hole.
3961 3990 */
3962 3991 offset = seed;
3963 3992 if (offset > kernelbase)
3964 3993 offset -= kernelbase;
3965 3994 offset <<= MMU_PAGESHIFT;
3966 3995 #if defined(__amd64)
3967 3996 offset += mmu.hole_start; /* something in VA hole */
3968 3997 #else
3969 3998 offset += 1ULL << 40; /* something > 4 Gig */
3970 3999 #endif
3971 4000
3972 4001 if (page_resv(1, KM_NOSLEEP) == 0)
3973 4002 return (NULL);
3974 4003
3975 4004 #ifdef DEBUG
3976 4005 pp = page_exists(&kvp, offset);
3977 4006 if (pp != NULL)
3978 4007 panic("page already exists %p", (void *)pp);
3979 4008 #endif
3980 4009
3981 4010 pp = page_create_va(&kvp, offset, MMU_PAGESIZE, PG_EXCL,
3982 4011 &tmpseg, (caddr_t)(ctr += MMU_PAGESIZE)); /* changing VA usage */
3983 4012 if (pp != NULL) {
3984 4013 page_io_unlock(pp);
3985 4014 page_downgrade(pp);
3986 4015 }
3987 4016 return (pp);
3988 4017 }
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX