1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  */
  30 
  31 /*
  32  * Big Theory Statement for the virtual memory allocator.
  33  *
  34  * For a more complete description of the main ideas, see:
  35  *
  36  *      Jeff Bonwick and Jonathan Adams,
  37  *
  38  *      Magazines and vmem: Extending the Slab Allocator to Many CPUs and
  39  *      Arbitrary Resources.
  40  *
  41  *      Proceedings of the 2001 Usenix Conference.
  42  *      Available as http://www.usenix.org/event/usenix01/bonwick.html
  43  *
  44  * Section 1, below, is also the primary contents of vmem(9).  If for some
  45  * reason you are updating this comment, you will also wish to update the
  46  * manual.
  47  *
  48  * 1. General Concepts
  49  * -------------------
  50  *
  51  * 1.1 Overview
  52  * ------------
  53  * We divide the kernel address space into a number of logically distinct
  54  * pieces, or *arenas*: text, data, heap, stack, and so on.  Within these
  55  * arenas we often subdivide further; for example, we use heap addresses
  56  * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
  57  * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
  58  * The kernel address space, therefore, is most accurately described as
  59  * a tree of arenas in which each node of the tree *imports* some subset
  60  * of its parent.  The virtual memory allocator manages these arenas and
  61  * supports their natural hierarchical structure.
  62  *
  63  * 1.2 Arenas
  64  * ----------
  65  * An arena is nothing more than a set of integers.  These integers most
  66  * commonly represent virtual addresses, but in fact they can represent
  67  * anything at all.  For example, we could use an arena containing the
  68  * integers minpid through maxpid to allocate process IDs.  vmem_create()
  69  * and vmem_destroy() create and destroy vmem arenas.  In order to
  70  * differentiate between arenas used for adresses and arenas used for
  71  * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create().  This
  72  * prevents identifier exhaustion from being diagnosed as general memory
  73  * failure.
  74  *
  75  * 1.3 Spans
  76  * ---------
  77  * We represent the integers in an arena as a collection of *spans*, or
  78  * contiguous ranges of integers.  For example, the kernel heap consists
  79  * of just one span: [kernelheap, ekernelheap).  Spans can be added to an
  80  * arena in two ways: explicitly, by vmem_add(), or implicitly, by
  81  * importing, as described in Section 1.5 below.
  82  *
  83  * 1.4 Segments
  84  * ------------
  85  * Spans are subdivided into *segments*, each of which is either allocated
  86  * or free.  A segment, like a span, is a contiguous range of integers.
  87  * Each allocated segment [addr, addr + size) represents exactly one
  88  * vmem_alloc(size) that returned addr.  Free segments represent the space
  89  * between allocated segments.  If two free segments are adjacent, we
  90  * coalesce them into one larger segment; that is, if segments [a, b) and
  91  * [b, c) are both free, we merge them into a single segment [a, c).
  92  * The segments within a span are linked together in increasing-address order
  93  * so we can easily determine whether coalescing is possible.
  94  *
  95  * Segments never cross span boundaries.  When all segments within
  96  * an imported span become free, we return the span to its source.
  97  *
  98  * 1.5 Imported Memory
  99  * -------------------
 100  * As mentioned in the overview, some arenas are logical subsets of
 101  * other arenas.  For example, kmem_va_arena (a virtual address cache
 102  * that satisfies most kmem_slab_create() requests) is just a subset
 103  * of heap_arena (the kernel heap) that provides caching for the most
 104  * common slab sizes.  When kmem_va_arena runs out of virtual memory,
 105  * it *imports* more from the heap; we say that heap_arena is the
 106  * *vmem source* for kmem_va_arena.  vmem_create() allows you to
 107  * specify any existing vmem arena as the source for your new arena.
 108  * Topologically, since every arena is a child of at most one source,
 109  * the set of all arenas forms a collection of trees.
 110  *
 111  * 1.6 Constrained Allocations
 112  * ---------------------------
 113  * Some vmem clients are quite picky about the kind of address they want.
 114  * For example, the DVMA code may need an address that is at a particular
 115  * phase with respect to some alignment (to get good cache coloring), or
 116  * that lies within certain limits (the addressable range of a device),
 117  * or that doesn't cross some boundary (a DMA counter restriction) --
 118  * or all of the above.  vmem_xalloc() allows the client to specify any
 119  * or all of these constraints.
 120  *
 121  * 1.7 The Vmem Quantum
 122  * --------------------
 123  * Every arena has a notion of 'quantum', specified at vmem_create() time,
 124  * that defines the arena's minimum unit of currency.  Most commonly the
 125  * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
 126  * All vmem allocations are guaranteed to be quantum-aligned.
 127  *
 128  * 1.8 Quantum Caching
 129  * -------------------
 130  * A vmem arena may be so hot (frequently used) that the scalability of vmem
 131  * allocation is a significant concern.  We address this by allowing the most
 132  * common allocation sizes to be serviced by the kernel memory allocator,
 133  * which provides low-latency per-cpu caching.  The qcache_max argument to
 134  * vmem_create() specifies the largest allocation size to cache.
 135  *
 136  * 1.9 Relationship to Kernel Memory Allocator
 137  * -------------------------------------------
 138  * Every kmem cache has a vmem arena as its slab supplier.  The kernel memory
 139  * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
 140  *
 141  *
 142  * 2. Implementation
 143  * -----------------
 144  *
 145  * 2.1 Segment lists and markers
 146  * -----------------------------
 147  * The segment structure (vmem_seg_t) contains two doubly-linked lists.
 148  *
 149  * The arena list (vs_anext/vs_aprev) links all segments in the arena.
 150  * In addition to the allocated and free segments, the arena contains
 151  * special marker segments at span boundaries.  Span markers simplify
 152  * coalescing and importing logic by making it easy to tell both when
 153  * we're at a span boundary (so we don't coalesce across it), and when
 154  * a span is completely free (its neighbors will both be span markers).
 155  *
 156  * Imported spans will have vs_import set.
 157  *
 158  * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
 159  * (1) for allocated segments, vs_knext is the hash chain linkage;
 160  * (2) for free segments, vs_knext is the freelist linkage;
 161  * (3) for span marker segments, vs_knext is the next span marker.
 162  *
 163  * 2.2 Allocation hashing
 164  * ----------------------
 165  * We maintain a hash table of all allocated segments, hashed by address.
 166  * This allows vmem_free() to discover the target segment in constant time.
 167  * vmem_update() periodically resizes hash tables to keep hash chains short.
 168  *
 169  * 2.3 Freelist management
 170  * -----------------------
 171  * We maintain power-of-2 freelists for free segments, i.e. free segments
 172  * of size >= 2^n reside in vmp->vm_freelist[n].  To ensure constant-time
 173  * allocation, vmem_xalloc() looks not in the first freelist that *might*
 174  * satisfy the allocation, but in the first freelist that *definitely*
 175  * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
 176  * freelists are empty).  For example, a 1000-byte allocation will be
 177  * satisfied not from the 512..1023-byte freelist, whose members *might*
 178  * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
 179  * the first member of which will *definitely* satisfy the allocation.
 180  * This ensures that vmem_xalloc() works in constant time.
 181  *
 182  * We maintain a bit map to determine quickly which freelists are non-empty.
 183  * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
 184  *
 185  * The different freelists are linked together into one large freelist,
 186  * with the freelist heads serving as markers.  Freelist markers simplify
 187  * the maintenance of vm_freemap by making it easy to tell when we're taking
 188  * the last member of a freelist (both of its neighbors will be markers).
 189  *
 190  * 2.4 Vmem Locking
 191  * ----------------
 192  * For simplicity, all arena state is protected by a per-arena lock.
 193  * For very hot arenas, use quantum caching for scalability.
 194  *
 195  * 2.5 Vmem Population
 196  * -------------------
 197  * Any internal vmem routine that might need to allocate new segment
 198  * structures must prepare in advance by calling vmem_populate(), which
 199  * will preallocate enough vmem_seg_t's to get is through the entire
 200  * operation without dropping the arena lock.
 201  *
 202  * 2.6 Auditing
 203  * ------------
 204  * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
 205  * Since virtual addresses cannot be scribbled on, there is no equivalent
 206  * in vmem to redzone checking, deadbeef, or other kmem debugging features.
 207  * Moreover, we do not audit frees because segment coalescing destroys the
 208  * association between an address and its segment structure.  Auditing is
 209  * thus intended primarily to keep track of who's consuming the arena.
 210  * Debugging support could certainly be extended in the future if it proves
 211  * necessary, but we do so much live checking via the allocation hash table
 212  * that even non-DEBUG systems get quite a bit of sanity checking already.
 213  */
 214 
 215 #include <sys/vmem_impl.h>
 216 #include <sys/kmem.h>
 217 #include <sys/kstat.h>
 218 #include <sys/param.h>
 219 #include <sys/systm.h>
 220 #include <sys/atomic.h>
 221 #include <sys/bitmap.h>
 222 #include <sys/sysmacros.h>
 223 #include <sys/cmn_err.h>
 224 #include <sys/debug.h>
 225 #include <sys/panic.h>
 226 
 227 #define VMEM_INITIAL            10      /* early vmem arenas */
 228 #define VMEM_SEG_INITIAL        200     /* early segments */
 229 
 230 /*
 231  * Adding a new span to an arena requires two segment structures: one to
 232  * represent the span, and one to represent the free segment it contains.
 233  */
 234 #define VMEM_SEGS_PER_SPAN_CREATE       2
 235 
 236 /*
 237  * Allocating a piece of an existing segment requires 0-2 segment structures
 238  * depending on how much of the segment we're allocating.
 239  *
 240  * To allocate the entire segment, no new segment structures are needed; we
 241  * simply move the existing segment structure from the freelist to the
 242  * allocation hash table.
 243  *
 244  * To allocate a piece from the left or right end of the segment, we must
 245  * split the segment into two pieces (allocated part and remainder), so we
 246  * need one new segment structure to represent the remainder.
 247  *
 248  * To allocate from the middle of a segment, we need two new segment strucures
 249  * to represent the remainders on either side of the allocated part.
 250  */
 251 #define VMEM_SEGS_PER_EXACT_ALLOC       0
 252 #define VMEM_SEGS_PER_LEFT_ALLOC        1
 253 #define VMEM_SEGS_PER_RIGHT_ALLOC       1
 254 #define VMEM_SEGS_PER_MIDDLE_ALLOC      2
 255 
 256 /*
 257  * vmem_populate() preallocates segment structures for vmem to do its work.
 258  * It must preallocate enough for the worst case, which is when we must import
 259  * a new span and then allocate from the middle of it.
 260  */
 261 #define VMEM_SEGS_PER_ALLOC_MAX         \
 262         (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
 263 
 264 /*
 265  * The segment structures themselves are allocated from vmem_seg_arena, so
 266  * we have a recursion problem when vmem_seg_arena needs to populate itself.
 267  * We address this by working out the maximum number of segment structures
 268  * this act will require, and multiplying by the maximum number of threads
 269  * that we'll allow to do it simultaneously.
 270  *
 271  * The worst-case segment consumption to populate vmem_seg_arena is as
 272  * follows (depicted as a stack trace to indicate why events are occurring):
 273  *
 274  * (In order to lower the fragmentation in the heap_arena, we specify a
 275  * minimum import size for the vmem_metadata_arena which is the same size
 276  * as the kmem_va quantum cache allocations.  This causes the worst-case
 277  * allocation from the vmem_metadata_arena to be 3 segments.)
 278  *
 279  * vmem_alloc(vmem_seg_arena)           -> 2 segs (span create + exact alloc)
 280  *  segkmem_alloc(vmem_metadata_arena)
 281  *   vmem_alloc(vmem_metadata_arena)    -> 3 segs (span create + left alloc)
 282  *    vmem_alloc(heap_arena)            -> 1 seg (left alloc)
 283  *   page_create()
 284  *   hat_memload()
 285  *    kmem_cache_alloc()
 286  *     kmem_slab_create()
 287  *      vmem_alloc(hat_memload_arena)   -> 2 segs (span create + exact alloc)
 288  *       segkmem_alloc(heap_arena)
 289  *        vmem_alloc(heap_arena)        -> 1 seg (left alloc)
 290  *        page_create()
 291  *        hat_memload()         -> (hat layer won't recurse further)
 292  *
 293  * The worst-case consumption for each arena is 3 segment structures.
 294  * Of course, a 3-seg reserve could easily be blown by multiple threads.
 295  * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
 296  * because they're rare).  We cannot allow a non-blocking allocation to get
 297  * tied up behind a blocking allocation, however, so we use separate locks
 298  * for VM_SLEEP and VM_NOSLEEP allocations.  Similarly, VM_PUSHPAGE allocations
 299  * must not block behind ordinary VM_SLEEPs.  In addition, if the system is
 300  * panicking then we must keep enough resources for panic_thread to do its
 301  * work.  Thus we have at most four threads trying to allocate from
 302  * vmem_seg_arena, and each thread consumes at most three segment structures,
 303  * so we must maintain a 12-seg reserve.
 304  */
 305 #define VMEM_POPULATE_RESERVE   12
 306 
 307 /*
 308  * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
 309  * so that it can satisfy the worst-case allocation *and* participate in
 310  * worst-case allocation from vmem_seg_arena.
 311  */
 312 #define VMEM_MINFREE    (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
 313 
 314 static vmem_t vmem0[VMEM_INITIAL];
 315 static vmem_t *vmem_populator[VMEM_INITIAL];
 316 static uint32_t vmem_id;
 317 static uint32_t vmem_populators;
 318 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
 319 static vmem_seg_t *vmem_segfree;
 320 static kmutex_t vmem_list_lock;
 321 static kmutex_t vmem_segfree_lock;
 322 static kmutex_t vmem_sleep_lock;
 323 static kmutex_t vmem_nosleep_lock;
 324 static kmutex_t vmem_pushpage_lock;
 325 static kmutex_t vmem_panic_lock;
 326 static vmem_t *vmem_list;
 327 static vmem_t *vmem_metadata_arena;
 328 static vmem_t *vmem_seg_arena;
 329 static vmem_t *vmem_hash_arena;
 330 static vmem_t *vmem_vmem_arena;
 331 static long vmem_update_interval = 15;  /* vmem_update() every 15 seconds */
 332 uint32_t vmem_mtbf;             /* mean time between failures [default: off] */
 333 size_t vmem_seg_size = sizeof (vmem_seg_t);
 334 
 335 static vmem_kstat_t vmem_kstat_template = {
 336         { "mem_inuse",          KSTAT_DATA_UINT64 },
 337         { "mem_import",         KSTAT_DATA_UINT64 },
 338         { "mem_total",          KSTAT_DATA_UINT64 },
 339         { "vmem_source",        KSTAT_DATA_UINT32 },
 340         { "alloc",              KSTAT_DATA_UINT64 },
 341         { "free",               KSTAT_DATA_UINT64 },
 342         { "wait",               KSTAT_DATA_UINT64 },
 343         { "fail",               KSTAT_DATA_UINT64 },
 344         { "lookup",             KSTAT_DATA_UINT64 },
 345         { "search",             KSTAT_DATA_UINT64 },
 346         { "populate_wait",      KSTAT_DATA_UINT64 },
 347         { "populate_fail",      KSTAT_DATA_UINT64 },
 348         { "contains",           KSTAT_DATA_UINT64 },
 349         { "contains_search",    KSTAT_DATA_UINT64 },
 350 };
 351 
 352 /*
 353  * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
 354  */
 355 #define VMEM_INSERT(vprev, vsp, type)                                   \
 356 {                                                                       \
 357         vmem_seg_t *vnext = (vprev)->vs_##type##next;                        \
 358         (vsp)->vs_##type##next = (vnext);                            \
 359         (vsp)->vs_##type##prev = (vprev);                            \
 360         (vprev)->vs_##type##next = (vsp);                            \
 361         (vnext)->vs_##type##prev = (vsp);                            \
 362 }
 363 
 364 #define VMEM_DELETE(vsp, type)                                          \
 365 {                                                                       \
 366         vmem_seg_t *vprev = (vsp)->vs_##type##prev;                  \
 367         vmem_seg_t *vnext = (vsp)->vs_##type##next;                  \
 368         (vprev)->vs_##type##next = (vnext);                          \
 369         (vnext)->vs_##type##prev = (vprev);                          \
 370 }
 371 
 372 /*
 373  * Get a vmem_seg_t from the global segfree list.
 374  */
 375 static vmem_seg_t *
 376 vmem_getseg_global(void)
 377 {
 378         vmem_seg_t *vsp;
 379 
 380         mutex_enter(&vmem_segfree_lock);
 381         if ((vsp = vmem_segfree) != NULL)
 382                 vmem_segfree = vsp->vs_knext;
 383         mutex_exit(&vmem_segfree_lock);
 384 
 385         return (vsp);
 386 }
 387 
 388 /*
 389  * Put a vmem_seg_t on the global segfree list.
 390  */
 391 static void
 392 vmem_putseg_global(vmem_seg_t *vsp)
 393 {
 394         mutex_enter(&vmem_segfree_lock);
 395         vsp->vs_knext = vmem_segfree;
 396         vmem_segfree = vsp;
 397         mutex_exit(&vmem_segfree_lock);
 398 }
 399 
 400 /*
 401  * Get a vmem_seg_t from vmp's segfree list.
 402  */
 403 static vmem_seg_t *
 404 vmem_getseg(vmem_t *vmp)
 405 {
 406         vmem_seg_t *vsp;
 407 
 408         ASSERT(vmp->vm_nsegfree > 0);
 409 
 410         vsp = vmp->vm_segfree;
 411         vmp->vm_segfree = vsp->vs_knext;
 412         vmp->vm_nsegfree--;
 413 
 414         return (vsp);
 415 }
 416 
 417 /*
 418  * Put a vmem_seg_t on vmp's segfree list.
 419  */
 420 static void
 421 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
 422 {
 423         vsp->vs_knext = vmp->vm_segfree;
 424         vmp->vm_segfree = vsp;
 425         vmp->vm_nsegfree++;
 426 }
 427 
 428 /*
 429  * Add vsp to the appropriate freelist.
 430  */
 431 static void
 432 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
 433 {
 434         vmem_seg_t *vprev;
 435 
 436         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
 437 
 438         vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
 439         vsp->vs_type = VMEM_FREE;
 440         vmp->vm_freemap |= VS_SIZE(vprev);
 441         VMEM_INSERT(vprev, vsp, k);
 442 
 443         cv_broadcast(&vmp->vm_cv);
 444 }
 445 
 446 /*
 447  * Take vsp from the freelist.
 448  */
 449 static void
 450 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
 451 {
 452         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
 453         ASSERT(vsp->vs_type == VMEM_FREE);
 454 
 455         if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
 456                 /*
 457                  * The segments on both sides of 'vsp' are freelist heads,
 458                  * so taking vsp leaves the freelist at vsp->vs_kprev empty.
 459                  */
 460                 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
 461                 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
 462         }
 463         VMEM_DELETE(vsp, k);
 464 }
 465 
 466 /*
 467  * Add vsp to the allocated-segment hash table and update kstats.
 468  */
 469 static void
 470 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
 471 {
 472         vmem_seg_t **bucket;
 473 
 474         vsp->vs_type = VMEM_ALLOC;
 475         bucket = VMEM_HASH(vmp, vsp->vs_start);
 476         vsp->vs_knext = *bucket;
 477         *bucket = vsp;
 478 
 479         if (vmem_seg_size == sizeof (vmem_seg_t)) {
 480                 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
 481                     VMEM_STACK_DEPTH);
 482                 vsp->vs_thread = curthread;
 483                 vsp->vs_timestamp = gethrtime();
 484         } else {
 485                 vsp->vs_depth = 0;
 486         }
 487 
 488         vmp->vm_kstat.vk_alloc.value.ui64++;
 489         vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
 490 }
 491 
 492 /*
 493  * Remove vsp from the allocated-segment hash table and update kstats.
 494  */
 495 static vmem_seg_t *
 496 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
 497 {
 498         vmem_seg_t *vsp, **prev_vspp;
 499 
 500         prev_vspp = VMEM_HASH(vmp, addr);
 501         while ((vsp = *prev_vspp) != NULL) {
 502                 if (vsp->vs_start == addr) {
 503                         *prev_vspp = vsp->vs_knext;
 504                         break;
 505                 }
 506                 vmp->vm_kstat.vk_lookup.value.ui64++;
 507                 prev_vspp = &vsp->vs_knext;
 508         }
 509 
 510         if (vsp == NULL)
 511                 panic("vmem_hash_delete(%p, %lx, %lu): bad free",
 512                     (void *)vmp, addr, size);
 513         if (VS_SIZE(vsp) != size)
 514                 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
 515                     (void *)vmp, addr, size, VS_SIZE(vsp));
 516 
 517         vmp->vm_kstat.vk_free.value.ui64++;
 518         vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
 519 
 520         return (vsp);
 521 }
 522 
 523 /*
 524  * Create a segment spanning the range [start, end) and add it to the arena.
 525  */
 526 static vmem_seg_t *
 527 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
 528 {
 529         vmem_seg_t *newseg = vmem_getseg(vmp);
 530 
 531         newseg->vs_start = start;
 532         newseg->vs_end = end;
 533         newseg->vs_type = 0;
 534         newseg->vs_import = 0;
 535 
 536         VMEM_INSERT(vprev, newseg, a);
 537 
 538         return (newseg);
 539 }
 540 
 541 /*
 542  * Remove segment vsp from the arena.
 543  */
 544 static void
 545 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
 546 {
 547         ASSERT(vsp->vs_type != VMEM_ROTOR);
 548         VMEM_DELETE(vsp, a);
 549 
 550         vmem_putseg(vmp, vsp);
 551 }
 552 
 553 /*
 554  * Add the span [vaddr, vaddr + size) to vmp and update kstats.
 555  */
 556 static vmem_seg_t *
 557 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
 558 {
 559         vmem_seg_t *newseg, *span;
 560         uintptr_t start = (uintptr_t)vaddr;
 561         uintptr_t end = start + size;
 562 
 563         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 564 
 565         if ((start | end) & (vmp->vm_quantum - 1))
 566                 panic("vmem_span_create(%p, %p, %lu): misaligned",
 567                     (void *)vmp, vaddr, size);
 568 
 569         span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
 570         span->vs_type = VMEM_SPAN;
 571         span->vs_import = import;
 572         VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
 573 
 574         newseg = vmem_seg_create(vmp, span, start, end);
 575         vmem_freelist_insert(vmp, newseg);
 576 
 577         if (import)
 578                 vmp->vm_kstat.vk_mem_import.value.ui64 += size;
 579         vmp->vm_kstat.vk_mem_total.value.ui64 += size;
 580 
 581         return (newseg);
 582 }
 583 
 584 /*
 585  * Remove span vsp from vmp and update kstats.
 586  */
 587 static void
 588 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
 589 {
 590         vmem_seg_t *span = vsp->vs_aprev;
 591         size_t size = VS_SIZE(vsp);
 592 
 593         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 594         ASSERT(span->vs_type == VMEM_SPAN);
 595 
 596         if (span->vs_import)
 597                 vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
 598         vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
 599 
 600         VMEM_DELETE(span, k);
 601 
 602         vmem_seg_destroy(vmp, vsp);
 603         vmem_seg_destroy(vmp, span);
 604 }
 605 
 606 /*
 607  * Allocate the subrange [addr, addr + size) from segment vsp.
 608  * If there are leftovers on either side, place them on the freelist.
 609  * Returns a pointer to the segment representing [addr, addr + size).
 610  */
 611 static vmem_seg_t *
 612 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
 613 {
 614         uintptr_t vs_start = vsp->vs_start;
 615         uintptr_t vs_end = vsp->vs_end;
 616         size_t vs_size = vs_end - vs_start;
 617         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
 618         uintptr_t addr_end = addr + realsize;
 619 
 620         ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
 621         ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
 622         ASSERT(vsp->vs_type == VMEM_FREE);
 623         ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
 624         ASSERT(addr - 1 <= addr_end - 1);
 625 
 626         /*
 627          * If we're allocating from the start of the segment, and the
 628          * remainder will be on the same freelist, we can save quite
 629          * a bit of work.
 630          */
 631         if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
 632                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
 633                 vsp->vs_start = addr_end;
 634                 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
 635                 vmem_hash_insert(vmp, vsp);
 636                 return (vsp);
 637         }
 638 
 639         vmem_freelist_delete(vmp, vsp);
 640 
 641         if (vs_end != addr_end)
 642                 vmem_freelist_insert(vmp,
 643                     vmem_seg_create(vmp, vsp, addr_end, vs_end));
 644 
 645         if (vs_start != addr)
 646                 vmem_freelist_insert(vmp,
 647                     vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
 648 
 649         vsp->vs_start = addr;
 650         vsp->vs_end = addr + size;
 651 
 652         vmem_hash_insert(vmp, vsp);
 653         return (vsp);
 654 }
 655 
 656 /*
 657  * Returns 1 if we are populating, 0 otherwise.
 658  * Call it if we want to prevent recursion from HAT.
 659  */
 660 int
 661 vmem_is_populator()
 662 {
 663         return (mutex_owner(&vmem_sleep_lock) == curthread ||
 664             mutex_owner(&vmem_nosleep_lock) == curthread ||
 665             mutex_owner(&vmem_pushpage_lock) == curthread ||
 666             mutex_owner(&vmem_panic_lock) == curthread);
 667 }
 668 
 669 /*
 670  * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
 671  */
 672 static int
 673 vmem_populate(vmem_t *vmp, int vmflag)
 674 {
 675         char *p;
 676         vmem_seg_t *vsp;
 677         ssize_t nseg;
 678         size_t size;
 679         kmutex_t *lp;
 680         int i;
 681 
 682         while (vmp->vm_nsegfree < VMEM_MINFREE &&
 683             (vsp = vmem_getseg_global()) != NULL)
 684                 vmem_putseg(vmp, vsp);
 685 
 686         if (vmp->vm_nsegfree >= VMEM_MINFREE)
 687                 return (1);
 688 
 689         /*
 690          * If we're already populating, tap the reserve.
 691          */
 692         if (vmem_is_populator()) {
 693                 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
 694                 return (1);
 695         }
 696 
 697         mutex_exit(&vmp->vm_lock);
 698 
 699         if (panic_thread == curthread)
 700                 lp = &vmem_panic_lock;
 701         else if (vmflag & VM_NOSLEEP)
 702                 lp = &vmem_nosleep_lock;
 703         else if (vmflag & VM_PUSHPAGE)
 704                 lp = &vmem_pushpage_lock;
 705         else
 706                 lp = &vmem_sleep_lock;
 707 
 708         mutex_enter(lp);
 709 
 710         nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
 711         size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
 712         nseg = size / vmem_seg_size;
 713 
 714         /*
 715          * The following vmem_alloc() may need to populate vmem_seg_arena
 716          * and all the things it imports from.  When doing so, it will tap
 717          * each arena's reserve to prevent recursion (see the block comment
 718          * above the definition of VMEM_POPULATE_RESERVE).
 719          */
 720         p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS);
 721         if (p == NULL) {
 722                 mutex_exit(lp);
 723                 mutex_enter(&vmp->vm_lock);
 724                 vmp->vm_kstat.vk_populate_fail.value.ui64++;
 725                 return (0);
 726         }
 727 
 728         /*
 729          * Restock the arenas that may have been depleted during population.
 730          */
 731         for (i = 0; i < vmem_populators; i++) {
 732                 mutex_enter(&vmem_populator[i]->vm_lock);
 733                 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
 734                         vmem_putseg(vmem_populator[i],
 735                             (vmem_seg_t *)(p + --nseg * vmem_seg_size));
 736                 mutex_exit(&vmem_populator[i]->vm_lock);
 737         }
 738 
 739         mutex_exit(lp);
 740         mutex_enter(&vmp->vm_lock);
 741 
 742         /*
 743          * Now take our own segments.
 744          */
 745         ASSERT(nseg >= VMEM_MINFREE);
 746         while (vmp->vm_nsegfree < VMEM_MINFREE)
 747                 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
 748 
 749         /*
 750          * Give the remainder to charity.
 751          */
 752         while (nseg > 0)
 753                 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
 754 
 755         return (1);
 756 }
 757 
 758 /*
 759  * Advance a walker from its previous position to 'afterme'.
 760  * Note: may drop and reacquire vmp->vm_lock.
 761  */
 762 static void
 763 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
 764 {
 765         vmem_seg_t *vprev = walker->vs_aprev;
 766         vmem_seg_t *vnext = walker->vs_anext;
 767         vmem_seg_t *vsp = NULL;
 768 
 769         VMEM_DELETE(walker, a);
 770 
 771         if (afterme != NULL)
 772                 VMEM_INSERT(afterme, walker, a);
 773 
 774         /*
 775          * The walker segment's presence may have prevented its neighbors
 776          * from coalescing.  If so, coalesce them now.
 777          */
 778         if (vprev->vs_type == VMEM_FREE) {
 779                 if (vnext->vs_type == VMEM_FREE) {
 780                         ASSERT(vprev->vs_end == vnext->vs_start);
 781                         vmem_freelist_delete(vmp, vnext);
 782                         vmem_freelist_delete(vmp, vprev);
 783                         vprev->vs_end = vnext->vs_end;
 784                         vmem_freelist_insert(vmp, vprev);
 785                         vmem_seg_destroy(vmp, vnext);
 786                 }
 787                 vsp = vprev;
 788         } else if (vnext->vs_type == VMEM_FREE) {
 789                 vsp = vnext;
 790         }
 791 
 792         /*
 793          * vsp could represent a complete imported span,
 794          * in which case we must return it to the source.
 795          */
 796         if (vsp != NULL && vsp->vs_aprev->vs_import &&
 797             vmp->vm_source_free != NULL &&
 798             vsp->vs_aprev->vs_type == VMEM_SPAN &&
 799             vsp->vs_anext->vs_type == VMEM_SPAN) {
 800                 void *vaddr = (void *)vsp->vs_start;
 801                 size_t size = VS_SIZE(vsp);
 802                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
 803                 vmem_freelist_delete(vmp, vsp);
 804                 vmem_span_destroy(vmp, vsp);
 805                 mutex_exit(&vmp->vm_lock);
 806                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
 807                 mutex_enter(&vmp->vm_lock);
 808         }
 809 }
 810 
 811 /*
 812  * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
 813  * in an arena, so that we avoid reusing addresses for as long as possible.
 814  * This helps to catch used-after-freed bugs.  It's also the perfect policy
 815  * for allocating things like process IDs, where we want to cycle through
 816  * all values in order.
 817  */
 818 static void *
 819 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
 820 {
 821         vmem_seg_t *vsp, *rotor;
 822         uintptr_t addr;
 823         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
 824         size_t vs_size;
 825 
 826         mutex_enter(&vmp->vm_lock);
 827 
 828         if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
 829                 mutex_exit(&vmp->vm_lock);
 830                 return (NULL);
 831         }
 832 
 833         /*
 834          * The common case is that the segment right after the rotor is free,
 835          * and large enough that extracting 'size' bytes won't change which
 836          * freelist it's on.  In this case we can avoid a *lot* of work.
 837          * Instead of the normal vmem_seg_alloc(), we just advance the start
 838          * address of the victim segment.  Instead of moving the rotor, we
 839          * create the new segment structure *behind the rotor*, which has
 840          * the same effect.  And finally, we know we don't have to coalesce
 841          * the rotor's neighbors because the new segment lies between them.
 842          */
 843         rotor = &vmp->vm_rotor;
 844         vsp = rotor->vs_anext;
 845         if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
 846             P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
 847                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
 848                 addr = vsp->vs_start;
 849                 vsp->vs_start = addr + realsize;
 850                 vmem_hash_insert(vmp,
 851                     vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
 852                 mutex_exit(&vmp->vm_lock);
 853                 return ((void *)addr);
 854         }
 855 
 856         /*
 857          * Starting at the rotor, look for a segment large enough to
 858          * satisfy the allocation.
 859          */
 860         for (;;) {
 861                 vmp->vm_kstat.vk_search.value.ui64++;
 862                 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
 863                         break;
 864                 vsp = vsp->vs_anext;
 865                 if (vsp == rotor) {
 866                         /*
 867                          * We've come full circle.  One possibility is that the
 868                          * there's actually enough space, but the rotor itself
 869                          * is preventing the allocation from succeeding because
 870                          * it's sitting between two free segments.  Therefore,
 871                          * we advance the rotor and see if that liberates a
 872                          * suitable segment.
 873                          */
 874                         vmem_advance(vmp, rotor, rotor->vs_anext);
 875                         vsp = rotor->vs_aprev;
 876                         if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
 877                                 break;
 878                         /*
 879                          * If there's a lower arena we can import from, or it's
 880                          * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
 881                          * Otherwise, wait until another thread frees something.
 882                          */
 883                         if (vmp->vm_source_alloc != NULL ||
 884                             (vmflag & VM_NOSLEEP)) {
 885                                 mutex_exit(&vmp->vm_lock);
 886                                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
 887                                     0, 0, NULL, NULL, vmflag & VM_KMFLAGS));
 888                         }
 889                         vmp->vm_kstat.vk_wait.value.ui64++;
 890                         cv_wait(&vmp->vm_cv, &vmp->vm_lock);
 891                         vsp = rotor->vs_anext;
 892                 }
 893         }
 894 
 895         /*
 896          * We found a segment.  Extract enough space to satisfy the allocation.
 897          */
 898         addr = vsp->vs_start;
 899         vsp = vmem_seg_alloc(vmp, vsp, addr, size);
 900         ASSERT(vsp->vs_type == VMEM_ALLOC &&
 901             vsp->vs_start == addr && vsp->vs_end == addr + size);
 902 
 903         /*
 904          * Advance the rotor to right after the newly-allocated segment.
 905          * That's where the next VM_NEXTFIT allocation will begin searching.
 906          */
 907         vmem_advance(vmp, rotor, vsp);
 908         mutex_exit(&vmp->vm_lock);
 909         return ((void *)addr);
 910 }
 911 
 912 /*
 913  * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
 914  * freelist.  If size is not a power-of-2, it can return a false-negative.
 915  *
 916  * Used to decide if a newly imported span is superfluous after re-acquiring
 917  * the arena lock.
 918  */
 919 static int
 920 vmem_canalloc(vmem_t *vmp, size_t size)
 921 {
 922         int hb;
 923         int flist = 0;
 924         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 925 
 926         if (ISP2(size))
 927                 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
 928         else if ((hb = highbit(size)) < VMEM_FREELISTS)
 929                 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
 930 
 931         return (flist);
 932 }
 933 
 934 /*
 935  * Allocate size bytes at offset phase from an align boundary such that the
 936  * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
 937  * that does not straddle a nocross-aligned boundary.
 938  */
 939 void *
 940 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
 941     size_t nocross, void *minaddr, void *maxaddr, int vmflag)
 942 {
 943         vmem_seg_t *vsp;
 944         vmem_seg_t *vbest = NULL;
 945         uintptr_t addr, taddr, start, end;
 946         uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
 947         void *vaddr, *xvaddr = NULL;
 948         size_t xsize;
 949         int hb, flist, resv;
 950         uint32_t mtbf;
 951 
 952         if ((align | phase | nocross) & (vmp->vm_quantum - 1))
 953                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 954                     "parameters not vm_quantum aligned",
 955                     (void *)vmp, size, align_arg, phase, nocross,
 956                     minaddr, maxaddr, vmflag);
 957 
 958         if (nocross != 0 &&
 959             (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
 960                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 961                     "overconstrained allocation",
 962                     (void *)vmp, size, align_arg, phase, nocross,
 963                     minaddr, maxaddr, vmflag);
 964 
 965         if (phase >= align || !ISP2(align) || !ISP2(nocross))
 966                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 967                     "parameters inconsistent or invalid",
 968                     (void *)vmp, size, align_arg, phase, nocross,
 969                     minaddr, maxaddr, vmflag);
 970 
 971         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
 972             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
 973                 return (NULL);
 974 
 975         mutex_enter(&vmp->vm_lock);
 976         for (;;) {
 977                 if (vmp->vm_nsegfree < VMEM_MINFREE &&
 978                     !vmem_populate(vmp, vmflag))
 979                         break;
 980 do_alloc:
 981                 /*
 982                  * highbit() returns the highest bit + 1, which is exactly
 983                  * what we want: we want to search the first freelist whose
 984                  * members are *definitely* large enough to satisfy our
 985                  * allocation.  However, there are certain cases in which we
 986                  * want to look at the next-smallest freelist (which *might*
 987                  * be able to satisfy the allocation):
 988                  *
 989                  * (1)  The size is exactly a power of 2, in which case
 990                  *      the smaller freelist is always big enough;
 991                  *
 992                  * (2)  All other freelists are empty;
 993                  *
 994                  * (3)  We're in the highest possible freelist, which is
 995                  *      always empty (e.g. the 4GB freelist on 32-bit systems);
 996                  *
 997                  * (4)  We're doing a best-fit or first-fit allocation.
 998                  */
 999                 if (ISP2(size)) {
1000                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1001                 } else {
1002                         hb = highbit(size);
1003                         if ((vmp->vm_freemap >> hb) == 0 ||
1004                             hb == VMEM_FREELISTS ||
1005                             (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
1006                                 hb--;
1007                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1008                 }
1009 
1010                 for (vbest = NULL, vsp = (flist == 0) ? NULL :
1011                     vmp->vm_freelist[flist - 1].vs_knext;
1012                     vsp != NULL; vsp = vsp->vs_knext) {
1013                         vmp->vm_kstat.vk_search.value.ui64++;
1014                         if (vsp->vs_start == 0) {
1015                                 /*
1016                                  * We're moving up to a larger freelist,
1017                                  * so if we've already found a candidate,
1018                                  * the fit can't possibly get any better.
1019                                  */
1020                                 if (vbest != NULL)
1021                                         break;
1022                                 /*
1023                                  * Find the next non-empty freelist.
1024                                  */
1025                                 flist = lowbit(P2ALIGN(vmp->vm_freemap,
1026                                     VS_SIZE(vsp)));
1027                                 if (flist-- == 0)
1028                                         break;
1029                                 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
1030                                 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
1031                                 continue;
1032                         }
1033                         if (vsp->vs_end - 1 < (uintptr_t)minaddr)
1034                                 continue;
1035                         if (vsp->vs_start > (uintptr_t)maxaddr - 1)
1036                                 continue;
1037                         start = MAX(vsp->vs_start, (uintptr_t)minaddr);
1038                         end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
1039                         taddr = P2PHASEUP(start, align, phase);
1040                         if (P2BOUNDARY(taddr, size, nocross))
1041                                 taddr +=
1042                                     P2ROUNDUP(P2NPHASE(taddr, nocross), align);
1043                         if ((taddr - start) + size > end - start ||
1044                             (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
1045                                 continue;
1046                         vbest = vsp;
1047                         addr = taddr;
1048                         if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
1049                                 break;
1050                 }
1051                 if (vbest != NULL)
1052                         break;
1053                 ASSERT(xvaddr == NULL);
1054                 if (size == 0)
1055                         panic("vmem_xalloc(): size == 0");
1056                 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
1057                     minaddr == NULL && maxaddr == NULL) {
1058                         size_t aneeded, asize;
1059                         size_t aquantum = MAX(vmp->vm_quantum,
1060                             vmp->vm_source->vm_quantum);
1061                         size_t aphase = phase;
1062                         if ((align > aquantum) &&
1063                             !(vmp->vm_cflags & VMC_XALIGN)) {
1064                                 aphase = (P2PHASE(phase, aquantum) != 0) ?
1065                                     align - vmp->vm_quantum : align - aquantum;
1066                                 ASSERT(aphase >= phase);
1067                         }
1068                         aneeded = MAX(size + aphase, vmp->vm_min_import);
1069                         asize = P2ROUNDUP(aneeded, aquantum);
1070 
1071                         if (asize < size) {
1072                                 /*
1073                                  * The rounding induced overflow; return NULL
1074                                  * if we are permitted to fail the allocation
1075                                  * (and explicitly panic if we aren't).
1076                                  */
1077                                 if ((vmflag & VM_NOSLEEP) &&
1078                                     !(vmflag & VM_PANIC)) {
1079                                         mutex_exit(&vmp->vm_lock);
1080                                         return (NULL);
1081                                 }
1082 
1083                                 panic("vmem_xalloc(): size overflow");
1084                         }
1085 
1086                         /*
1087                          * Determine how many segment structures we'll consume.
1088                          * The calculation must be precise because if we're
1089                          * here on behalf of vmem_populate(), we are taking
1090                          * segments from a very limited reserve.
1091                          */
1092                         if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
1093                                 resv = VMEM_SEGS_PER_SPAN_CREATE +
1094                                     VMEM_SEGS_PER_EXACT_ALLOC;
1095                         else if (phase == 0 &&
1096                             align <= vmp->vm_source->vm_quantum)
1097                                 resv = VMEM_SEGS_PER_SPAN_CREATE +
1098                                     VMEM_SEGS_PER_LEFT_ALLOC;
1099                         else
1100                                 resv = VMEM_SEGS_PER_ALLOC_MAX;
1101 
1102                         ASSERT(vmp->vm_nsegfree >= resv);
1103                         vmp->vm_nsegfree -= resv;    /* reserve our segs */
1104                         mutex_exit(&vmp->vm_lock);
1105                         if (vmp->vm_cflags & VMC_XALLOC) {
1106                                 size_t oasize = asize;
1107                                 vaddr = ((vmem_ximport_t *)
1108                                     vmp->vm_source_alloc)(vmp->vm_source,
1109                                     &asize, align, vmflag & VM_KMFLAGS);
1110                                 ASSERT(asize >= oasize);
1111                                 ASSERT(P2PHASE(asize,
1112                                     vmp->vm_source->vm_quantum) == 0);
1113                                 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
1114                                     IS_P2ALIGNED(vaddr, align));
1115                         } else {
1116                                 vaddr = vmp->vm_source_alloc(vmp->vm_source,
1117                                     asize, vmflag & VM_KMFLAGS);
1118                         }
1119                         mutex_enter(&vmp->vm_lock);
1120                         vmp->vm_nsegfree += resv;    /* claim reservation */
1121                         aneeded = size + align - vmp->vm_quantum;
1122                         aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
1123                         if (vaddr != NULL) {
1124                                 /*
1125                                  * Since we dropped the vmem lock while
1126                                  * calling the import function, other
1127                                  * threads could have imported space
1128                                  * and made our import unnecessary.  In
1129                                  * order to save space, we return
1130                                  * excess imports immediately.
1131                                  */
1132                                 if (asize > aneeded &&
1133                                     vmp->vm_source_free != NULL &&
1134                                     vmem_canalloc(vmp, aneeded)) {
1135                                         ASSERT(resv >=
1136                                             VMEM_SEGS_PER_MIDDLE_ALLOC);
1137                                         xvaddr = vaddr;
1138                                         xsize = asize;
1139                                         goto do_alloc;
1140                                 }
1141                                 vbest = vmem_span_create(vmp, vaddr, asize, 1);
1142                                 addr = P2PHASEUP(vbest->vs_start, align, phase);
1143                                 break;
1144                         } else if (vmem_canalloc(vmp, aneeded)) {
1145                                 /*
1146                                  * Our import failed, but another thread
1147                                  * added sufficient free memory to the arena
1148                                  * to satisfy our request.  Go back and
1149                                  * grab it.
1150                                  */
1151                                 ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC);
1152                                 goto do_alloc;
1153                         }
1154                 }
1155 
1156                 /*
1157                  * If the requestor chooses to fail the allocation attempt
1158                  * rather than reap wait and retry - get out of the loop.
1159                  */
1160                 if (vmflag & VM_ABORT)
1161                         break;
1162                 mutex_exit(&vmp->vm_lock);
1163                 if (vmp->vm_cflags & VMC_IDENTIFIER)
1164                         kmem_reap_idspace();
1165                 else
1166                         kmem_reap();
1167                 mutex_enter(&vmp->vm_lock);
1168                 if (vmflag & VM_NOSLEEP)
1169                         break;
1170                 vmp->vm_kstat.vk_wait.value.ui64++;
1171                 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
1172         }
1173         if (vbest != NULL) {
1174                 ASSERT(vbest->vs_type == VMEM_FREE);
1175                 ASSERT(vbest->vs_knext != vbest);
1176                 /* re-position to end of buffer */
1177                 if (vmflag & VM_ENDALLOC) {
1178                         addr += ((vbest->vs_end - (addr + size)) / align) *
1179                             align;
1180                 }
1181                 (void) vmem_seg_alloc(vmp, vbest, addr, size);
1182                 mutex_exit(&vmp->vm_lock);
1183                 if (xvaddr)
1184                         vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
1185                 ASSERT(P2PHASE(addr, align) == phase);
1186                 ASSERT(!P2BOUNDARY(addr, size, nocross));
1187                 ASSERT(addr >= (uintptr_t)minaddr);
1188                 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
1189                 return ((void *)addr);
1190         }
1191         vmp->vm_kstat.vk_fail.value.ui64++;
1192         mutex_exit(&vmp->vm_lock);
1193         if (vmflag & VM_PANIC)
1194                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1195                     "cannot satisfy mandatory allocation",
1196                     (void *)vmp, size, align_arg, phase, nocross,
1197                     minaddr, maxaddr, vmflag);
1198         ASSERT(xvaddr == NULL);
1199         return (NULL);
1200 }
1201 
1202 /*
1203  * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1204  * allocation.  vmem_xalloc() and vmem_xfree() must always be paired because
1205  * both routines bypass the quantum caches.
1206  */
1207 void
1208 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1209 {
1210         vmem_seg_t *vsp, *vnext, *vprev;
1211 
1212         mutex_enter(&vmp->vm_lock);
1213 
1214         vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1215         vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1216 
1217         /*
1218          * Attempt to coalesce with the next segment.
1219          */
1220         vnext = vsp->vs_anext;
1221         if (vnext->vs_type == VMEM_FREE) {
1222                 ASSERT(vsp->vs_end == vnext->vs_start);
1223                 vmem_freelist_delete(vmp, vnext);
1224                 vsp->vs_end = vnext->vs_end;
1225                 vmem_seg_destroy(vmp, vnext);
1226         }
1227 
1228         /*
1229          * Attempt to coalesce with the previous segment.
1230          */
1231         vprev = vsp->vs_aprev;
1232         if (vprev->vs_type == VMEM_FREE) {
1233                 ASSERT(vprev->vs_end == vsp->vs_start);
1234                 vmem_freelist_delete(vmp, vprev);
1235                 vprev->vs_end = vsp->vs_end;
1236                 vmem_seg_destroy(vmp, vsp);
1237                 vsp = vprev;
1238         }
1239 
1240         /*
1241          * If the entire span is free, return it to the source.
1242          */
1243         if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1244             vsp->vs_aprev->vs_type == VMEM_SPAN &&
1245             vsp->vs_anext->vs_type == VMEM_SPAN) {
1246                 vaddr = (void *)vsp->vs_start;
1247                 size = VS_SIZE(vsp);
1248                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1249                 vmem_span_destroy(vmp, vsp);
1250                 mutex_exit(&vmp->vm_lock);
1251                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1252         } else {
1253                 vmem_freelist_insert(vmp, vsp);
1254                 mutex_exit(&vmp->vm_lock);
1255         }
1256 }
1257 
1258 /*
1259  * Allocate size bytes from arena vmp.  Returns the allocated address
1260  * on success, NULL on failure.  vmflag specifies VM_SLEEP or VM_NOSLEEP,
1261  * and may also specify best-fit, first-fit, or next-fit allocation policy
1262  * instead of the default instant-fit policy.  VM_SLEEP allocations are
1263  * guaranteed to succeed.
1264  */
1265 void *
1266 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1267 {
1268         vmem_seg_t *vsp;
1269         uintptr_t addr;
1270         int hb;
1271         int flist = 0;
1272         uint32_t mtbf;
1273 
1274         if (size - 1 < vmp->vm_qcache_max)
1275                 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1276                     vmp->vm_qshift], vmflag & VM_KMFLAGS));
1277 
1278         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1279             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1280                 return (NULL);
1281 
1282         if (vmflag & VM_NEXTFIT)
1283                 return (vmem_nextfit_alloc(vmp, size, vmflag));
1284 
1285         if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1286                 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1287                     NULL, NULL, vmflag));
1288 
1289         /*
1290          * Unconstrained instant-fit allocation from the segment list.
1291          */
1292         mutex_enter(&vmp->vm_lock);
1293 
1294         if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1295                 if (ISP2(size))
1296                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1297                 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1298                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1299         }
1300 
1301         if (flist-- == 0) {
1302                 mutex_exit(&vmp->vm_lock);
1303                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1304                     0, 0, NULL, NULL, vmflag));
1305         }
1306 
1307         ASSERT(size <= (1UL << flist));
1308         vsp = vmp->vm_freelist[flist].vs_knext;
1309         addr = vsp->vs_start;
1310         if (vmflag & VM_ENDALLOC) {
1311                 addr += vsp->vs_end - (addr + size);
1312         }
1313         (void) vmem_seg_alloc(vmp, vsp, addr, size);
1314         mutex_exit(&vmp->vm_lock);
1315         return ((void *)addr);
1316 }
1317 
1318 /*
1319  * Free the segment [vaddr, vaddr + size).
1320  */
1321 void
1322 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1323 {
1324         if (size - 1 < vmp->vm_qcache_max)
1325                 kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1326                     vaddr);
1327         else
1328                 vmem_xfree(vmp, vaddr, size);
1329 }
1330 
1331 /*
1332  * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1333  */
1334 int
1335 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1336 {
1337         uintptr_t start = (uintptr_t)vaddr;
1338         uintptr_t end = start + size;
1339         vmem_seg_t *vsp;
1340         vmem_seg_t *seg0 = &vmp->vm_seg0;
1341 
1342         mutex_enter(&vmp->vm_lock);
1343         vmp->vm_kstat.vk_contains.value.ui64++;
1344         for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1345                 vmp->vm_kstat.vk_contains_search.value.ui64++;
1346                 ASSERT(vsp->vs_type == VMEM_SPAN);
1347                 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1348                         break;
1349         }
1350         mutex_exit(&vmp->vm_lock);
1351         return (vsp != seg0);
1352 }
1353 
1354 /*
1355  * Add the span [vaddr, vaddr + size) to arena vmp.
1356  */
1357 void *
1358 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1359 {
1360         if (vaddr == NULL || size == 0)
1361                 panic("vmem_add(%p, %p, %lu): bad arguments",
1362                     (void *)vmp, vaddr, size);
1363 
1364         ASSERT(!vmem_contains(vmp, vaddr, size));
1365 
1366         mutex_enter(&vmp->vm_lock);
1367         if (vmem_populate(vmp, vmflag))
1368                 (void) vmem_span_create(vmp, vaddr, size, 0);
1369         else
1370                 vaddr = NULL;
1371         mutex_exit(&vmp->vm_lock);
1372         return (vaddr);
1373 }
1374 
1375 /*
1376  * Walk the vmp arena, applying func to each segment matching typemask.
1377  * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1378  * call to func(); otherwise, it is held for the duration of vmem_walk()
1379  * to ensure a consistent snapshot.  Note that VMEM_REENTRANT callbacks
1380  * are *not* necessarily consistent, so they may only be used when a hint
1381  * is adequate.
1382  */
1383 void
1384 vmem_walk(vmem_t *vmp, int typemask,
1385     void (*func)(void *, void *, size_t), void *arg)
1386 {
1387         vmem_seg_t *vsp;
1388         vmem_seg_t *seg0 = &vmp->vm_seg0;
1389         vmem_seg_t walker;
1390 
1391         if (typemask & VMEM_WALKER)
1392                 return;
1393 
1394         bzero(&walker, sizeof (walker));
1395         walker.vs_type = VMEM_WALKER;
1396 
1397         mutex_enter(&vmp->vm_lock);
1398         VMEM_INSERT(seg0, &walker, a);
1399         for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1400                 if (vsp->vs_type & typemask) {
1401                         void *start = (void *)vsp->vs_start;
1402                         size_t size = VS_SIZE(vsp);
1403                         if (typemask & VMEM_REENTRANT) {
1404                                 vmem_advance(vmp, &walker, vsp);
1405                                 mutex_exit(&vmp->vm_lock);
1406                                 func(arg, start, size);
1407                                 mutex_enter(&vmp->vm_lock);
1408                                 vsp = &walker;
1409                         } else {
1410                                 func(arg, start, size);
1411                         }
1412                 }
1413         }
1414         vmem_advance(vmp, &walker, NULL);
1415         mutex_exit(&vmp->vm_lock);
1416 }
1417 
1418 /*
1419  * Return the total amount of memory whose type matches typemask.  Thus:
1420  *
1421  *      typemask VMEM_ALLOC yields total memory allocated (in use).
1422  *      typemask VMEM_FREE yields total memory free (available).
1423  *      typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1424  */
1425 size_t
1426 vmem_size(vmem_t *vmp, int typemask)
1427 {
1428         uint64_t size = 0;
1429 
1430         if (typemask & VMEM_ALLOC)
1431                 size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
1432         if (typemask & VMEM_FREE)
1433                 size += vmp->vm_kstat.vk_mem_total.value.ui64 -
1434                     vmp->vm_kstat.vk_mem_inuse.value.ui64;
1435         return ((size_t)size);
1436 }
1437 
1438 /*
1439  * Create an arena called name whose initial span is [base, base + size).
1440  * The arena's natural unit of currency is quantum, so vmem_alloc()
1441  * guarantees quantum-aligned results.  The arena may import new spans
1442  * by invoking afunc() on source, and may return those spans by invoking
1443  * ffunc() on source.  To make small allocations fast and scalable,
1444  * the arena offers high-performance caching for each integer multiple
1445  * of quantum up to qcache_max.
1446  */
1447 static vmem_t *
1448 vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1449     void *(*afunc)(vmem_t *, size_t, int),
1450     void (*ffunc)(vmem_t *, void *, size_t),
1451     vmem_t *source, size_t qcache_max, int vmflag)
1452 {
1453         int i;
1454         size_t nqcache;
1455         vmem_t *vmp, *cur, **vmpp;
1456         vmem_seg_t *vsp;
1457         vmem_freelist_t *vfp;
1458         uint32_t id = atomic_inc_32_nv(&vmem_id);
1459 
1460         if (vmem_vmem_arena != NULL) {
1461                 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1462                     vmflag & VM_KMFLAGS);
1463         } else {
1464                 ASSERT(id <= VMEM_INITIAL);
1465                 vmp = &vmem0[id - 1];
1466         }
1467 
1468         /* An identifier arena must inherit from another identifier arena */
1469         ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1470             (vmflag & VMC_IDENTIFIER)));
1471 
1472         if (vmp == NULL)
1473                 return (NULL);
1474         bzero(vmp, sizeof (vmem_t));
1475 
1476         (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1477         mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1478         cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1479         vmp->vm_cflags = vmflag;
1480         vmflag &= VM_KMFLAGS;
1481 
1482         vmp->vm_quantum = quantum;
1483         vmp->vm_qshift = highbit(quantum) - 1;
1484         nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1485 
1486         for (i = 0; i <= VMEM_FREELISTS; i++) {
1487                 vfp = &vmp->vm_freelist[i];
1488                 vfp->vs_end = 1UL << i;
1489                 vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1490                 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1491         }
1492 
1493         vmp->vm_freelist[0].vs_kprev = NULL;
1494         vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1495         vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1496         vmp->vm_hash_table = vmp->vm_hash0;
1497         vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1498         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1499 
1500         vsp = &vmp->vm_seg0;
1501         vsp->vs_anext = vsp;
1502         vsp->vs_aprev = vsp;
1503         vsp->vs_knext = vsp;
1504         vsp->vs_kprev = vsp;
1505         vsp->vs_type = VMEM_SPAN;
1506 
1507         vsp = &vmp->vm_rotor;
1508         vsp->vs_type = VMEM_ROTOR;
1509         VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1510 
1511         bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
1512 
1513         vmp->vm_id = id;
1514         if (source != NULL)
1515                 vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
1516         vmp->vm_source = source;
1517         vmp->vm_source_alloc = afunc;
1518         vmp->vm_source_free = ffunc;
1519 
1520         /*
1521          * Some arenas (like vmem_metadata and kmem_metadata) cannot
1522          * use quantum caching to lower fragmentation.  Instead, we
1523          * increase their imports, giving a similar effect.
1524          */
1525         if (vmp->vm_cflags & VMC_NO_QCACHE) {
1526                 vmp->vm_min_import =
1527                     VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
1528                 nqcache = 0;
1529         }
1530 
1531         if (nqcache != 0) {
1532                 ASSERT(!(vmflag & VM_NOSLEEP));
1533                 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1534                 for (i = 0; i < nqcache; i++) {
1535                         char buf[VMEM_NAMELEN + 21];
1536                         (void) sprintf(buf, "%s_%lu", vmp->vm_name,
1537                             (i + 1) * quantum);
1538                         vmp->vm_qcache[i] = kmem_cache_create(buf,
1539                             (i + 1) * quantum, quantum, NULL, NULL, NULL,
1540                             NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1541                 }
1542         }
1543 
1544         if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1545             "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1546             sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1547                 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1548                 kstat_install(vmp->vm_ksp);
1549         }
1550 
1551         mutex_enter(&vmem_list_lock);
1552         vmpp = &vmem_list;
1553         while ((cur = *vmpp) != NULL)
1554                 vmpp = &cur->vm_next;
1555         *vmpp = vmp;
1556         mutex_exit(&vmem_list_lock);
1557 
1558         if (vmp->vm_cflags & VMC_POPULATOR) {
1559                 ASSERT(vmem_populators < VMEM_INITIAL);
1560                 vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
1561                 mutex_enter(&vmp->vm_lock);
1562                 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1563                 mutex_exit(&vmp->vm_lock);
1564         }
1565 
1566         if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1567                 vmem_destroy(vmp);
1568                 return (NULL);
1569         }
1570 
1571         return (vmp);
1572 }
1573 
1574 vmem_t *
1575 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1576     vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1577     size_t qcache_max, int vmflag)
1578 {
1579         ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1580         vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
1581 
1582         return (vmem_create_common(name, base, size, quantum,
1583             (vmem_alloc_t *)afunc, ffunc, source, qcache_max,
1584             vmflag | VMC_XALLOC));
1585 }
1586 
1587 vmem_t *
1588 vmem_create(const char *name, void *base, size_t size, size_t quantum,
1589     vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1590     size_t qcache_max, int vmflag)
1591 {
1592         ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN)));
1593         vmflag &= ~(VMC_XALLOC | VMC_XALIGN);
1594 
1595         return (vmem_create_common(name, base, size, quantum,
1596             afunc, ffunc, source, qcache_max, vmflag));
1597 }
1598 
1599 /*
1600  * Destroy arena vmp.
1601  */
1602 void
1603 vmem_destroy(vmem_t *vmp)
1604 {
1605         vmem_t *cur, **vmpp;
1606         vmem_seg_t *seg0 = &vmp->vm_seg0;
1607         vmem_seg_t *vsp, *anext;
1608         size_t leaked;
1609         int i;
1610 
1611         mutex_enter(&vmem_list_lock);
1612         vmpp = &vmem_list;
1613         while ((cur = *vmpp) != vmp)
1614                 vmpp = &cur->vm_next;
1615         *vmpp = vmp->vm_next;
1616         mutex_exit(&vmem_list_lock);
1617 
1618         for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1619                 if (vmp->vm_qcache[i])
1620                         kmem_cache_destroy(vmp->vm_qcache[i]);
1621 
1622         leaked = vmem_size(vmp, VMEM_ALLOC);
1623         if (leaked != 0)
1624                 cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s",
1625                     vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
1626                     "identifiers" : "bytes");
1627 
1628         if (vmp->vm_hash_table != vmp->vm_hash0)
1629                 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1630                     (vmp->vm_hash_mask + 1) * sizeof (void *));
1631 
1632         /*
1633          * Give back the segment structures for anything that's left in the
1634          * arena, e.g. the primary spans and their free segments.
1635          */
1636         VMEM_DELETE(&vmp->vm_rotor, a);
1637         for (vsp = seg0->vs_anext; vsp != seg0; vsp = anext) {
1638                 anext = vsp->vs_anext;
1639                 vmem_putseg_global(vsp);
1640         }
1641 
1642         while (vmp->vm_nsegfree > 0)
1643                 vmem_putseg_global(vmem_getseg(vmp));
1644 
1645         kstat_delete(vmp->vm_ksp);
1646 
1647         mutex_destroy(&vmp->vm_lock);
1648         cv_destroy(&vmp->vm_cv);
1649         vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1650 }
1651 
1652 /*
1653  * Only shrink vmem hashtable if it is 1<<vmem_rescale_minshift times (8x)
1654  * larger than necessary.
1655  */
1656 int vmem_rescale_minshift = 3;
1657 
1658 /*
1659  * Resize vmp's hash table to keep the average lookup depth near 1.0.
1660  */
1661 static void
1662 vmem_hash_rescale(vmem_t *vmp)
1663 {
1664         vmem_seg_t **old_table, **new_table, *vsp;
1665         size_t old_size, new_size, h, nseg;
1666 
1667         nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
1668             vmp->vm_kstat.vk_free.value.ui64);
1669 
1670         new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1671         old_size = vmp->vm_hash_mask + 1;
1672 
1673         if ((old_size >> vmem_rescale_minshift) <= new_size &&
1674             new_size <= (old_size << 1))
1675                 return;
1676 
1677         new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1678             VM_NOSLEEP);
1679         if (new_table == NULL)
1680                 return;
1681         bzero(new_table, new_size * sizeof (void *));
1682 
1683         mutex_enter(&vmp->vm_lock);
1684 
1685         old_size = vmp->vm_hash_mask + 1;
1686         old_table = vmp->vm_hash_table;
1687 
1688         vmp->vm_hash_mask = new_size - 1;
1689         vmp->vm_hash_table = new_table;
1690         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1691 
1692         for (h = 0; h < old_size; h++) {
1693                 vsp = old_table[h];
1694                 while (vsp != NULL) {
1695                         uintptr_t addr = vsp->vs_start;
1696                         vmem_seg_t *next_vsp = vsp->vs_knext;
1697                         vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1698                         vsp->vs_knext = *hash_bucket;
1699                         *hash_bucket = vsp;
1700                         vsp = next_vsp;
1701                 }
1702         }
1703 
1704         mutex_exit(&vmp->vm_lock);
1705 
1706         if (old_table != vmp->vm_hash0)
1707                 vmem_free(vmem_hash_arena, old_table,
1708                     old_size * sizeof (void *));
1709 }
1710 
1711 /*
1712  * Perform periodic maintenance on all vmem arenas.
1713  */
1714 void
1715 vmem_update(void *dummy)
1716 {
1717         vmem_t *vmp;
1718 
1719         mutex_enter(&vmem_list_lock);
1720         for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1721                 /*
1722                  * If threads are waiting for resources, wake them up
1723                  * periodically so they can issue another kmem_reap()
1724                  * to reclaim resources cached by the slab allocator.
1725                  */
1726                 cv_broadcast(&vmp->vm_cv);
1727 
1728                 /*
1729                  * Rescale the hash table to keep the hash chains short.
1730                  */
1731                 vmem_hash_rescale(vmp);
1732         }
1733         mutex_exit(&vmem_list_lock);
1734 
1735         (void) timeout(vmem_update, dummy, vmem_update_interval * hz);
1736 }
1737 
1738 void
1739 vmem_qcache_reap(vmem_t *vmp)
1740 {
1741         int i;
1742 
1743         /*
1744          * Reap any quantum caches that may be part of this vmem.
1745          */
1746         for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1747                 if (vmp->vm_qcache[i])
1748                         kmem_cache_reap_now(vmp->vm_qcache[i]);
1749 }
1750 
1751 /*
1752  * Prepare vmem for use.
1753  */
1754 vmem_t *
1755 vmem_init(const char *heap_name,
1756     void *heap_start, size_t heap_size, size_t heap_quantum,
1757     void *(*heap_alloc)(vmem_t *, size_t, int),
1758     void (*heap_free)(vmem_t *, void *, size_t))
1759 {
1760         uint32_t id;
1761         int nseg = VMEM_SEG_INITIAL;
1762         vmem_t *heap;
1763 
1764         while (--nseg >= 0)
1765                 vmem_putseg_global(&vmem_seg0[nseg]);
1766 
1767         heap = vmem_create(heap_name,
1768             heap_start, heap_size, heap_quantum,
1769             NULL, NULL, NULL, 0,
1770             VM_SLEEP | VMC_POPULATOR);
1771 
1772         vmem_metadata_arena = vmem_create("vmem_metadata",
1773             NULL, 0, heap_quantum,
1774             vmem_alloc, vmem_free, heap, 8 * heap_quantum,
1775             VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE);
1776 
1777         vmem_seg_arena = vmem_create("vmem_seg",
1778             NULL, 0, heap_quantum,
1779             heap_alloc, heap_free, vmem_metadata_arena, 0,
1780             VM_SLEEP | VMC_POPULATOR);
1781 
1782         vmem_hash_arena = vmem_create("vmem_hash",
1783             NULL, 0, 8,
1784             heap_alloc, heap_free, vmem_metadata_arena, 0,
1785             VM_SLEEP);
1786 
1787         vmem_vmem_arena = vmem_create("vmem_vmem",
1788             vmem0, sizeof (vmem0), 1,
1789             heap_alloc, heap_free, vmem_metadata_arena, 0,
1790             VM_SLEEP);
1791 
1792         for (id = 0; id < vmem_id; id++)
1793                 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1794                     1, 0, 0, &vmem0[id], &vmem0[id + 1],
1795                     VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1796 
1797         return (heap);
1798 }