1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
  28  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  29  */
  30 
  31 /*
  32  * Big Theory Statement for the virtual memory allocator.
  33  *
  34  * For a more complete description of the main ideas, see:
  35  *
  36  *      Jeff Bonwick and Jonathan Adams,
  37  *
  38  *      Magazines and vmem: Extending the Slab Allocator to Many CPUs and
  39  *      Arbitrary Resources.
  40  *
  41  *      Proceedings of the 2001 Usenix Conference.
  42  *      Available as http://www.usenix.org/event/usenix01/bonwick.html
  43  *
  44  *
  45  * 1. General Concepts
  46  * -------------------
  47  *
  48  * 1.1 Overview
  49  * ------------
  50  * We divide the kernel address space into a number of logically distinct
  51  * pieces, or *arenas*: text, data, heap, stack, and so on.  Within these
  52  * arenas we often subdivide further; for example, we use heap addresses
  53  * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
  54  * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
  55  * The kernel address space, therefore, is most accurately described as
  56  * a tree of arenas in which each node of the tree *imports* some subset
  57  * of its parent.  The virtual memory allocator manages these arenas and
  58  * supports their natural hierarchical structure.
  59  *
  60  * 1.2 Arenas
  61  * ----------
  62  * An arena is nothing more than a set of integers.  These integers most
  63  * commonly represent virtual addresses, but in fact they can represent
  64  * anything at all.  For example, we could use an arena containing the
  65  * integers minpid through maxpid to allocate process IDs.  vmem_create()
  66  * and vmem_destroy() create and destroy vmem arenas.  In order to
  67  * differentiate between arenas used for adresses and arenas used for
  68  * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create().  This
  69  * prevents identifier exhaustion from being diagnosed as general memory
  70  * failure.
  71  *
  72  * 1.3 Spans
  73  * ---------
  74  * We represent the integers in an arena as a collection of *spans*, or
  75  * contiguous ranges of integers.  For example, the kernel heap consists
  76  * of just one span: [kernelheap, ekernelheap).  Spans can be added to an
  77  * arena in two ways: explicitly, by vmem_add(), or implicitly, by
  78  * importing, as described in Section 1.5 below.
  79  *
  80  * 1.4 Segments
  81  * ------------
  82  * Spans are subdivided into *segments*, each of which is either allocated
  83  * or free.  A segment, like a span, is a contiguous range of integers.
  84  * Each allocated segment [addr, addr + size) represents exactly one
  85  * vmem_alloc(size) that returned addr.  Free segments represent the space
  86  * between allocated segments.  If two free segments are adjacent, we
  87  * coalesce them into one larger segment; that is, if segments [a, b) and
  88  * [b, c) are both free, we merge them into a single segment [a, c).
  89  * The segments within a span are linked together in increasing-address order
  90  * so we can easily determine whether coalescing is possible.
  91  *
  92  * Segments never cross span boundaries.  When all segments within
  93  * an imported span become free, we return the span to its source.
  94  *
  95  * 1.5 Imported Memory
  96  * -------------------
  97  * As mentioned in the overview, some arenas are logical subsets of
  98  * other arenas.  For example, kmem_va_arena (a virtual address cache
  99  * that satisfies most kmem_slab_create() requests) is just a subset
 100  * of heap_arena (the kernel heap) that provides caching for the most
 101  * common slab sizes.  When kmem_va_arena runs out of virtual memory,
 102  * it *imports* more from the heap; we say that heap_arena is the
 103  * *vmem source* for kmem_va_arena.  vmem_create() allows you to
 104  * specify any existing vmem arena as the source for your new arena.
 105  * Topologically, since every arena is a child of at most one source,
 106  * the set of all arenas forms a collection of trees.
 107  *
 108  * 1.6 Constrained Allocations
 109  * ---------------------------
 110  * Some vmem clients are quite picky about the kind of address they want.
 111  * For example, the DVMA code may need an address that is at a particular
 112  * phase with respect to some alignment (to get good cache coloring), or
 113  * that lies within certain limits (the addressable range of a device),
 114  * or that doesn't cross some boundary (a DMA counter restriction) --
 115  * or all of the above.  vmem_xalloc() allows the client to specify any
 116  * or all of these constraints.
 117  *
 118  * 1.7 The Vmem Quantum
 119  * --------------------
 120  * Every arena has a notion of 'quantum', specified at vmem_create() time,
 121  * that defines the arena's minimum unit of currency.  Most commonly the
 122  * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
 123  * All vmem allocations are guaranteed to be quantum-aligned.
 124  *
 125  * 1.8 Quantum Caching
 126  * -------------------
 127  * A vmem arena may be so hot (frequently used) that the scalability of vmem
 128  * allocation is a significant concern.  We address this by allowing the most
 129  * common allocation sizes to be serviced by the kernel memory allocator,
 130  * which provides low-latency per-cpu caching.  The qcache_max argument to
 131  * vmem_create() specifies the largest allocation size to cache.
 132  *
 133  * 1.9 Relationship to Kernel Memory Allocator
 134  * -------------------------------------------
 135  * Every kmem cache has a vmem arena as its slab supplier.  The kernel memory
 136  * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
 137  *
 138  *
 139  * 2. Implementation
 140  * -----------------
 141  *
 142  * 2.1 Segment lists and markers
 143  * -----------------------------
 144  * The segment structure (vmem_seg_t) contains two doubly-linked lists.
 145  *
 146  * The arena list (vs_anext/vs_aprev) links all segments in the arena.
 147  * In addition to the allocated and free segments, the arena contains
 148  * special marker segments at span boundaries.  Span markers simplify
 149  * coalescing and importing logic by making it easy to tell both when
 150  * we're at a span boundary (so we don't coalesce across it), and when
 151  * a span is completely free (its neighbors will both be span markers).
 152  *
 153  * Imported spans will have vs_import set.
 154  *
 155  * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
 156  * (1) for allocated segments, vs_knext is the hash chain linkage;
 157  * (2) for free segments, vs_knext is the freelist linkage;
 158  * (3) for span marker segments, vs_knext is the next span marker.
 159  *
 160  * 2.2 Allocation hashing
 161  * ----------------------
 162  * We maintain a hash table of all allocated segments, hashed by address.
 163  * This allows vmem_free() to discover the target segment in constant time.
 164  * vmem_update() periodically resizes hash tables to keep hash chains short.
 165  *
 166  * 2.3 Freelist management
 167  * -----------------------
 168  * We maintain power-of-2 freelists for free segments, i.e. free segments
 169  * of size >= 2^n reside in vmp->vm_freelist[n].  To ensure constant-time
 170  * allocation, vmem_xalloc() looks not in the first freelist that *might*
 171  * satisfy the allocation, but in the first freelist that *definitely*
 172  * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
 173  * freelists are empty).  For example, a 1000-byte allocation will be
 174  * satisfied not from the 512..1023-byte freelist, whose members *might*
 175  * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
 176  * the first member of which will *definitely* satisfy the allocation.
 177  * This ensures that vmem_xalloc() works in constant time.
 178  *
 179  * We maintain a bit map to determine quickly which freelists are non-empty.
 180  * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
 181  *
 182  * The different freelists are linked together into one large freelist,
 183  * with the freelist heads serving as markers.  Freelist markers simplify
 184  * the maintenance of vm_freemap by making it easy to tell when we're taking
 185  * the last member of a freelist (both of its neighbors will be markers).
 186  *
 187  * 2.4 Vmem Locking
 188  * ----------------
 189  * For simplicity, all arena state is protected by a per-arena lock.
 190  * For very hot arenas, use quantum caching for scalability.
 191  *
 192  * 2.5 Vmem Population
 193  * -------------------
 194  * Any internal vmem routine that might need to allocate new segment
 195  * structures must prepare in advance by calling vmem_populate(), which
 196  * will preallocate enough vmem_seg_t's to get is through the entire
 197  * operation without dropping the arena lock.
 198  *
 199  * 2.6 Auditing
 200  * ------------
 201  * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
 202  * Since virtual addresses cannot be scribbled on, there is no equivalent
 203  * in vmem to redzone checking, deadbeef, or other kmem debugging features.
 204  * Moreover, we do not audit frees because segment coalescing destroys the
 205  * association between an address and its segment structure.  Auditing is
 206  * thus intended primarily to keep track of who's consuming the arena.
 207  * Debugging support could certainly be extended in the future if it proves
 208  * necessary, but we do so much live checking via the allocation hash table
 209  * that even non-DEBUG systems get quite a bit of sanity checking already.
 210  */
 211 
 212 #include <sys/vmem_impl.h>
 213 #include <sys/kmem.h>
 214 #include <sys/kstat.h>
 215 #include <sys/param.h>
 216 #include <sys/systm.h>
 217 #include <sys/atomic.h>
 218 #include <sys/bitmap.h>
 219 #include <sys/sysmacros.h>
 220 #include <sys/cmn_err.h>
 221 #include <sys/debug.h>
 222 #include <sys/panic.h>
 223 
 224 #define VMEM_INITIAL            10      /* early vmem arenas */
 225 #define VMEM_SEG_INITIAL        200     /* early segments */
 226 
 227 /*
 228  * Adding a new span to an arena requires two segment structures: one to
 229  * represent the span, and one to represent the free segment it contains.
 230  */
 231 #define VMEM_SEGS_PER_SPAN_CREATE       2
 232 
 233 /*
 234  * Allocating a piece of an existing segment requires 0-2 segment structures
 235  * depending on how much of the segment we're allocating.
 236  *
 237  * To allocate the entire segment, no new segment structures are needed; we
 238  * simply move the existing segment structure from the freelist to the
 239  * allocation hash table.
 240  *
 241  * To allocate a piece from the left or right end of the segment, we must
 242  * split the segment into two pieces (allocated part and remainder), so we
 243  * need one new segment structure to represent the remainder.
 244  *
 245  * To allocate from the middle of a segment, we need two new segment strucures
 246  * to represent the remainders on either side of the allocated part.
 247  */
 248 #define VMEM_SEGS_PER_EXACT_ALLOC       0
 249 #define VMEM_SEGS_PER_LEFT_ALLOC        1
 250 #define VMEM_SEGS_PER_RIGHT_ALLOC       1
 251 #define VMEM_SEGS_PER_MIDDLE_ALLOC      2
 252 
 253 /*
 254  * vmem_populate() preallocates segment structures for vmem to do its work.
 255  * It must preallocate enough for the worst case, which is when we must import
 256  * a new span and then allocate from the middle of it.
 257  */
 258 #define VMEM_SEGS_PER_ALLOC_MAX         \
 259         (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
 260 
 261 /*
 262  * The segment structures themselves are allocated from vmem_seg_arena, so
 263  * we have a recursion problem when vmem_seg_arena needs to populate itself.
 264  * We address this by working out the maximum number of segment structures
 265  * this act will require, and multiplying by the maximum number of threads
 266  * that we'll allow to do it simultaneously.
 267  *
 268  * The worst-case segment consumption to populate vmem_seg_arena is as
 269  * follows (depicted as a stack trace to indicate why events are occurring):
 270  *
 271  * (In order to lower the fragmentation in the heap_arena, we specify a
 272  * minimum import size for the vmem_metadata_arena which is the same size
 273  * as the kmem_va quantum cache allocations.  This causes the worst-case
 274  * allocation from the vmem_metadata_arena to be 3 segments.)
 275  *
 276  * vmem_alloc(vmem_seg_arena)           -> 2 segs (span create + exact alloc)
 277  *  segkmem_alloc(vmem_metadata_arena)
 278  *   vmem_alloc(vmem_metadata_arena)    -> 3 segs (span create + left alloc)
 279  *    vmem_alloc(heap_arena)            -> 1 seg (left alloc)
 280  *   page_create()
 281  *   hat_memload()
 282  *    kmem_cache_alloc()
 283  *     kmem_slab_create()
 284  *      vmem_alloc(hat_memload_arena)   -> 2 segs (span create + exact alloc)
 285  *       segkmem_alloc(heap_arena)
 286  *        vmem_alloc(heap_arena)        -> 1 seg (left alloc)
 287  *        page_create()
 288  *        hat_memload()         -> (hat layer won't recurse further)
 289  *
 290  * The worst-case consumption for each arena is 3 segment structures.
 291  * Of course, a 3-seg reserve could easily be blown by multiple threads.
 292  * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
 293  * because they're rare).  We cannot allow a non-blocking allocation to get
 294  * tied up behind a blocking allocation, however, so we use separate locks
 295  * for VM_SLEEP and VM_NOSLEEP allocations.  Similarly, VM_PUSHPAGE allocations
 296  * must not block behind ordinary VM_SLEEPs.  In addition, if the system is
 297  * panicking then we must keep enough resources for panic_thread to do its
 298  * work.  Thus we have at most four threads trying to allocate from
 299  * vmem_seg_arena, and each thread consumes at most three segment structures,
 300  * so we must maintain a 12-seg reserve.
 301  */
 302 #define VMEM_POPULATE_RESERVE   12
 303 
 304 /*
 305  * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
 306  * so that it can satisfy the worst-case allocation *and* participate in
 307  * worst-case allocation from vmem_seg_arena.
 308  */
 309 #define VMEM_MINFREE    (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
 310 
 311 static vmem_t vmem0[VMEM_INITIAL];
 312 static vmem_t *vmem_populator[VMEM_INITIAL];
 313 static uint32_t vmem_id;
 314 static uint32_t vmem_populators;
 315 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
 316 static vmem_seg_t *vmem_segfree;
 317 static kmutex_t vmem_list_lock;
 318 static kmutex_t vmem_segfree_lock;
 319 static kmutex_t vmem_sleep_lock;
 320 static kmutex_t vmem_nosleep_lock;
 321 static kmutex_t vmem_pushpage_lock;
 322 static kmutex_t vmem_panic_lock;
 323 static vmem_t *vmem_list;
 324 static vmem_t *vmem_metadata_arena;
 325 static vmem_t *vmem_seg_arena;
 326 static vmem_t *vmem_hash_arena;
 327 static vmem_t *vmem_vmem_arena;
 328 static long vmem_update_interval = 15;  /* vmem_update() every 15 seconds */
 329 uint32_t vmem_mtbf;             /* mean time between failures [default: off] */
 330 size_t vmem_seg_size = sizeof (vmem_seg_t);
 331 
 332 static vmem_kstat_t vmem_kstat_template = {
 333         { "mem_inuse",          KSTAT_DATA_UINT64 },
 334         { "mem_import",         KSTAT_DATA_UINT64 },
 335         { "mem_total",          KSTAT_DATA_UINT64 },
 336         { "vmem_source",        KSTAT_DATA_UINT32 },
 337         { "alloc",              KSTAT_DATA_UINT64 },
 338         { "free",               KSTAT_DATA_UINT64 },
 339         { "wait",               KSTAT_DATA_UINT64 },
 340         { "fail",               KSTAT_DATA_UINT64 },
 341         { "lookup",             KSTAT_DATA_UINT64 },
 342         { "search",             KSTAT_DATA_UINT64 },
 343         { "populate_wait",      KSTAT_DATA_UINT64 },
 344         { "populate_fail",      KSTAT_DATA_UINT64 },
 345         { "contains",           KSTAT_DATA_UINT64 },
 346         { "contains_search",    KSTAT_DATA_UINT64 },
 347 };
 348 
 349 /*
 350  * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
 351  */
 352 #define VMEM_INSERT(vprev, vsp, type)                                   \
 353 {                                                                       \
 354         vmem_seg_t *vnext = (vprev)->vs_##type##next;                        \
 355         (vsp)->vs_##type##next = (vnext);                            \
 356         (vsp)->vs_##type##prev = (vprev);                            \
 357         (vprev)->vs_##type##next = (vsp);                            \
 358         (vnext)->vs_##type##prev = (vsp);                            \
 359 }
 360 
 361 #define VMEM_DELETE(vsp, type)                                          \
 362 {                                                                       \
 363         vmem_seg_t *vprev = (vsp)->vs_##type##prev;                  \
 364         vmem_seg_t *vnext = (vsp)->vs_##type##next;                  \
 365         (vprev)->vs_##type##next = (vnext);                          \
 366         (vnext)->vs_##type##prev = (vprev);                          \
 367 }
 368 
 369 /*
 370  * Get a vmem_seg_t from the global segfree list.
 371  */
 372 static vmem_seg_t *
 373 vmem_getseg_global(void)
 374 {
 375         vmem_seg_t *vsp;
 376 
 377         mutex_enter(&vmem_segfree_lock);
 378         if ((vsp = vmem_segfree) != NULL)
 379                 vmem_segfree = vsp->vs_knext;
 380         mutex_exit(&vmem_segfree_lock);
 381 
 382         return (vsp);
 383 }
 384 
 385 /*
 386  * Put a vmem_seg_t on the global segfree list.
 387  */
 388 static void
 389 vmem_putseg_global(vmem_seg_t *vsp)
 390 {
 391         mutex_enter(&vmem_segfree_lock);
 392         vsp->vs_knext = vmem_segfree;
 393         vmem_segfree = vsp;
 394         mutex_exit(&vmem_segfree_lock);
 395 }
 396 
 397 /*
 398  * Get a vmem_seg_t from vmp's segfree list.
 399  */
 400 static vmem_seg_t *
 401 vmem_getseg(vmem_t *vmp)
 402 {
 403         vmem_seg_t *vsp;
 404 
 405         ASSERT(vmp->vm_nsegfree > 0);
 406 
 407         vsp = vmp->vm_segfree;
 408         vmp->vm_segfree = vsp->vs_knext;
 409         vmp->vm_nsegfree--;
 410 
 411         return (vsp);
 412 }
 413 
 414 /*
 415  * Put a vmem_seg_t on vmp's segfree list.
 416  */
 417 static void
 418 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
 419 {
 420         vsp->vs_knext = vmp->vm_segfree;
 421         vmp->vm_segfree = vsp;
 422         vmp->vm_nsegfree++;
 423 }
 424 
 425 /*
 426  * Add vsp to the appropriate freelist.
 427  */
 428 static void
 429 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
 430 {
 431         vmem_seg_t *vprev;
 432 
 433         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
 434 
 435         vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
 436         vsp->vs_type = VMEM_FREE;
 437         vmp->vm_freemap |= VS_SIZE(vprev);
 438         VMEM_INSERT(vprev, vsp, k);
 439 
 440         cv_broadcast(&vmp->vm_cv);
 441 }
 442 
 443 /*
 444  * Take vsp from the freelist.
 445  */
 446 static void
 447 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
 448 {
 449         ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
 450         ASSERT(vsp->vs_type == VMEM_FREE);
 451 
 452         if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
 453                 /*
 454                  * The segments on both sides of 'vsp' are freelist heads,
 455                  * so taking vsp leaves the freelist at vsp->vs_kprev empty.
 456                  */
 457                 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
 458                 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
 459         }
 460         VMEM_DELETE(vsp, k);
 461 }
 462 
 463 /*
 464  * Add vsp to the allocated-segment hash table and update kstats.
 465  */
 466 static void
 467 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
 468 {
 469         vmem_seg_t **bucket;
 470 
 471         vsp->vs_type = VMEM_ALLOC;
 472         bucket = VMEM_HASH(vmp, vsp->vs_start);
 473         vsp->vs_knext = *bucket;
 474         *bucket = vsp;
 475 
 476         if (vmem_seg_size == sizeof (vmem_seg_t)) {
 477                 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
 478                     VMEM_STACK_DEPTH);
 479                 vsp->vs_thread = curthread;
 480                 vsp->vs_timestamp = gethrtime();
 481         } else {
 482                 vsp->vs_depth = 0;
 483         }
 484 
 485         vmp->vm_kstat.vk_alloc.value.ui64++;
 486         vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
 487 }
 488 
 489 /*
 490  * Remove vsp from the allocated-segment hash table and update kstats.
 491  */
 492 static vmem_seg_t *
 493 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
 494 {
 495         vmem_seg_t *vsp, **prev_vspp;
 496 
 497         prev_vspp = VMEM_HASH(vmp, addr);
 498         while ((vsp = *prev_vspp) != NULL) {
 499                 if (vsp->vs_start == addr) {
 500                         *prev_vspp = vsp->vs_knext;
 501                         break;
 502                 }
 503                 vmp->vm_kstat.vk_lookup.value.ui64++;
 504                 prev_vspp = &vsp->vs_knext;
 505         }
 506 
 507         if (vsp == NULL)
 508                 panic("vmem_hash_delete(%p, %lx, %lu): bad free",
 509                     (void *)vmp, addr, size);
 510         if (VS_SIZE(vsp) != size)
 511                 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
 512                     (void *)vmp, addr, size, VS_SIZE(vsp));
 513 
 514         vmp->vm_kstat.vk_free.value.ui64++;
 515         vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
 516 
 517         return (vsp);
 518 }
 519 
 520 /*
 521  * Create a segment spanning the range [start, end) and add it to the arena.
 522  */
 523 static vmem_seg_t *
 524 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
 525 {
 526         vmem_seg_t *newseg = vmem_getseg(vmp);
 527 
 528         newseg->vs_start = start;
 529         newseg->vs_end = end;
 530         newseg->vs_type = 0;
 531         newseg->vs_import = 0;
 532 
 533         VMEM_INSERT(vprev, newseg, a);
 534 
 535         return (newseg);
 536 }
 537 
 538 /*
 539  * Remove segment vsp from the arena.
 540  */
 541 static void
 542 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
 543 {
 544         ASSERT(vsp->vs_type != VMEM_ROTOR);
 545         VMEM_DELETE(vsp, a);
 546 
 547         vmem_putseg(vmp, vsp);
 548 }
 549 
 550 /*
 551  * Add the span [vaddr, vaddr + size) to vmp and update kstats.
 552  */
 553 static vmem_seg_t *
 554 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
 555 {
 556         vmem_seg_t *newseg, *span;
 557         uintptr_t start = (uintptr_t)vaddr;
 558         uintptr_t end = start + size;
 559 
 560         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 561 
 562         if ((start | end) & (vmp->vm_quantum - 1))
 563                 panic("vmem_span_create(%p, %p, %lu): misaligned",
 564                     (void *)vmp, vaddr, size);
 565 
 566         span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
 567         span->vs_type = VMEM_SPAN;
 568         span->vs_import = import;
 569         VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
 570 
 571         newseg = vmem_seg_create(vmp, span, start, end);
 572         vmem_freelist_insert(vmp, newseg);
 573 
 574         if (import)
 575                 vmp->vm_kstat.vk_mem_import.value.ui64 += size;
 576         vmp->vm_kstat.vk_mem_total.value.ui64 += size;
 577 
 578         return (newseg);
 579 }
 580 
 581 /*
 582  * Remove span vsp from vmp and update kstats.
 583  */
 584 static void
 585 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
 586 {
 587         vmem_seg_t *span = vsp->vs_aprev;
 588         size_t size = VS_SIZE(vsp);
 589 
 590         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 591         ASSERT(span->vs_type == VMEM_SPAN);
 592 
 593         if (span->vs_import)
 594                 vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
 595         vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
 596 
 597         VMEM_DELETE(span, k);
 598 
 599         vmem_seg_destroy(vmp, vsp);
 600         vmem_seg_destroy(vmp, span);
 601 }
 602 
 603 /*
 604  * Allocate the subrange [addr, addr + size) from segment vsp.
 605  * If there are leftovers on either side, place them on the freelist.
 606  * Returns a pointer to the segment representing [addr, addr + size).
 607  */
 608 static vmem_seg_t *
 609 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
 610 {
 611         uintptr_t vs_start = vsp->vs_start;
 612         uintptr_t vs_end = vsp->vs_end;
 613         size_t vs_size = vs_end - vs_start;
 614         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
 615         uintptr_t addr_end = addr + realsize;
 616 
 617         ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
 618         ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
 619         ASSERT(vsp->vs_type == VMEM_FREE);
 620         ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
 621         ASSERT(addr - 1 <= addr_end - 1);
 622 
 623         /*
 624          * If we're allocating from the start of the segment, and the
 625          * remainder will be on the same freelist, we can save quite
 626          * a bit of work.
 627          */
 628         if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
 629                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
 630                 vsp->vs_start = addr_end;
 631                 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
 632                 vmem_hash_insert(vmp, vsp);
 633                 return (vsp);
 634         }
 635 
 636         vmem_freelist_delete(vmp, vsp);
 637 
 638         if (vs_end != addr_end)
 639                 vmem_freelist_insert(vmp,
 640                     vmem_seg_create(vmp, vsp, addr_end, vs_end));
 641 
 642         if (vs_start != addr)
 643                 vmem_freelist_insert(vmp,
 644                     vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
 645 
 646         vsp->vs_start = addr;
 647         vsp->vs_end = addr + size;
 648 
 649         vmem_hash_insert(vmp, vsp);
 650         return (vsp);
 651 }
 652 
 653 /*
 654  * Returns 1 if we are populating, 0 otherwise.
 655  * Call it if we want to prevent recursion from HAT.
 656  */
 657 int
 658 vmem_is_populator()
 659 {
 660         return (mutex_owner(&vmem_sleep_lock) == curthread ||
 661             mutex_owner(&vmem_nosleep_lock) == curthread ||
 662             mutex_owner(&vmem_pushpage_lock) == curthread ||
 663             mutex_owner(&vmem_panic_lock) == curthread);
 664 }
 665 
 666 /*
 667  * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
 668  */
 669 static int
 670 vmem_populate(vmem_t *vmp, int vmflag)
 671 {
 672         char *p;
 673         vmem_seg_t *vsp;
 674         ssize_t nseg;
 675         size_t size;
 676         kmutex_t *lp;
 677         int i;
 678 
 679         while (vmp->vm_nsegfree < VMEM_MINFREE &&
 680             (vsp = vmem_getseg_global()) != NULL)
 681                 vmem_putseg(vmp, vsp);
 682 
 683         if (vmp->vm_nsegfree >= VMEM_MINFREE)
 684                 return (1);
 685 
 686         /*
 687          * If we're already populating, tap the reserve.
 688          */
 689         if (vmem_is_populator()) {
 690                 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
 691                 return (1);
 692         }
 693 
 694         mutex_exit(&vmp->vm_lock);
 695 
 696         if (panic_thread == curthread)
 697                 lp = &vmem_panic_lock;
 698         else if (vmflag & VM_NOSLEEP)
 699                 lp = &vmem_nosleep_lock;
 700         else if (vmflag & VM_PUSHPAGE)
 701                 lp = &vmem_pushpage_lock;
 702         else
 703                 lp = &vmem_sleep_lock;
 704 
 705         mutex_enter(lp);
 706 
 707         nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
 708         size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
 709         nseg = size / vmem_seg_size;
 710 
 711         /*
 712          * The following vmem_alloc() may need to populate vmem_seg_arena
 713          * and all the things it imports from.  When doing so, it will tap
 714          * each arena's reserve to prevent recursion (see the block comment
 715          * above the definition of VMEM_POPULATE_RESERVE).
 716          */
 717         p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS);
 718         if (p == NULL) {
 719                 mutex_exit(lp);
 720                 mutex_enter(&vmp->vm_lock);
 721                 vmp->vm_kstat.vk_populate_fail.value.ui64++;
 722                 return (0);
 723         }
 724 
 725         /*
 726          * Restock the arenas that may have been depleted during population.
 727          */
 728         for (i = 0; i < vmem_populators; i++) {
 729                 mutex_enter(&vmem_populator[i]->vm_lock);
 730                 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
 731                         vmem_putseg(vmem_populator[i],
 732                             (vmem_seg_t *)(p + --nseg * vmem_seg_size));
 733                 mutex_exit(&vmem_populator[i]->vm_lock);
 734         }
 735 
 736         mutex_exit(lp);
 737         mutex_enter(&vmp->vm_lock);
 738 
 739         /*
 740          * Now take our own segments.
 741          */
 742         ASSERT(nseg >= VMEM_MINFREE);
 743         while (vmp->vm_nsegfree < VMEM_MINFREE)
 744                 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
 745 
 746         /*
 747          * Give the remainder to charity.
 748          */
 749         while (nseg > 0)
 750                 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
 751 
 752         return (1);
 753 }
 754 
 755 /*
 756  * Advance a walker from its previous position to 'afterme'.
 757  * Note: may drop and reacquire vmp->vm_lock.
 758  */
 759 static void
 760 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
 761 {
 762         vmem_seg_t *vprev = walker->vs_aprev;
 763         vmem_seg_t *vnext = walker->vs_anext;
 764         vmem_seg_t *vsp = NULL;
 765 
 766         VMEM_DELETE(walker, a);
 767 
 768         if (afterme != NULL)
 769                 VMEM_INSERT(afterme, walker, a);
 770 
 771         /*
 772          * The walker segment's presence may have prevented its neighbors
 773          * from coalescing.  If so, coalesce them now.
 774          */
 775         if (vprev->vs_type == VMEM_FREE) {
 776                 if (vnext->vs_type == VMEM_FREE) {
 777                         ASSERT(vprev->vs_end == vnext->vs_start);
 778                         vmem_freelist_delete(vmp, vnext);
 779                         vmem_freelist_delete(vmp, vprev);
 780                         vprev->vs_end = vnext->vs_end;
 781                         vmem_freelist_insert(vmp, vprev);
 782                         vmem_seg_destroy(vmp, vnext);
 783                 }
 784                 vsp = vprev;
 785         } else if (vnext->vs_type == VMEM_FREE) {
 786                 vsp = vnext;
 787         }
 788 
 789         /*
 790          * vsp could represent a complete imported span,
 791          * in which case we must return it to the source.
 792          */
 793         if (vsp != NULL && vsp->vs_aprev->vs_import &&
 794             vmp->vm_source_free != NULL &&
 795             vsp->vs_aprev->vs_type == VMEM_SPAN &&
 796             vsp->vs_anext->vs_type == VMEM_SPAN) {
 797                 void *vaddr = (void *)vsp->vs_start;
 798                 size_t size = VS_SIZE(vsp);
 799                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
 800                 vmem_freelist_delete(vmp, vsp);
 801                 vmem_span_destroy(vmp, vsp);
 802                 mutex_exit(&vmp->vm_lock);
 803                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
 804                 mutex_enter(&vmp->vm_lock);
 805         }
 806 }
 807 
 808 /*
 809  * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
 810  * in an arena, so that we avoid reusing addresses for as long as possible.
 811  * This helps to catch used-after-freed bugs.  It's also the perfect policy
 812  * for allocating things like process IDs, where we want to cycle through
 813  * all values in order.
 814  */
 815 static void *
 816 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
 817 {
 818         vmem_seg_t *vsp, *rotor;
 819         uintptr_t addr;
 820         size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
 821         size_t vs_size;
 822 
 823         mutex_enter(&vmp->vm_lock);
 824 
 825         if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
 826                 mutex_exit(&vmp->vm_lock);
 827                 return (NULL);
 828         }
 829 
 830         /*
 831          * The common case is that the segment right after the rotor is free,
 832          * and large enough that extracting 'size' bytes won't change which
 833          * freelist it's on.  In this case we can avoid a *lot* of work.
 834          * Instead of the normal vmem_seg_alloc(), we just advance the start
 835          * address of the victim segment.  Instead of moving the rotor, we
 836          * create the new segment structure *behind the rotor*, which has
 837          * the same effect.  And finally, we know we don't have to coalesce
 838          * the rotor's neighbors because the new segment lies between them.
 839          */
 840         rotor = &vmp->vm_rotor;
 841         vsp = rotor->vs_anext;
 842         if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
 843             P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
 844                 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
 845                 addr = vsp->vs_start;
 846                 vsp->vs_start = addr + realsize;
 847                 vmem_hash_insert(vmp,
 848                     vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
 849                 mutex_exit(&vmp->vm_lock);
 850                 return ((void *)addr);
 851         }
 852 
 853         /*
 854          * Starting at the rotor, look for a segment large enough to
 855          * satisfy the allocation.
 856          */
 857         for (;;) {
 858                 vmp->vm_kstat.vk_search.value.ui64++;
 859                 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
 860                         break;
 861                 vsp = vsp->vs_anext;
 862                 if (vsp == rotor) {
 863                         /*
 864                          * We've come full circle.  One possibility is that the
 865                          * there's actually enough space, but the rotor itself
 866                          * is preventing the allocation from succeeding because
 867                          * it's sitting between two free segments.  Therefore,
 868                          * we advance the rotor and see if that liberates a
 869                          * suitable segment.
 870                          */
 871                         vmem_advance(vmp, rotor, rotor->vs_anext);
 872                         vsp = rotor->vs_aprev;
 873                         if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
 874                                 break;
 875                         /*
 876                          * If there's a lower arena we can import from, or it's
 877                          * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
 878                          * Otherwise, wait until another thread frees something.
 879                          */
 880                         if (vmp->vm_source_alloc != NULL ||
 881                             (vmflag & VM_NOSLEEP)) {
 882                                 mutex_exit(&vmp->vm_lock);
 883                                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
 884                                     0, 0, NULL, NULL, vmflag & VM_KMFLAGS));
 885                         }
 886                         vmp->vm_kstat.vk_wait.value.ui64++;
 887                         cv_wait(&vmp->vm_cv, &vmp->vm_lock);
 888                         vsp = rotor->vs_anext;
 889                 }
 890         }
 891 
 892         /*
 893          * We found a segment.  Extract enough space to satisfy the allocation.
 894          */
 895         addr = vsp->vs_start;
 896         vsp = vmem_seg_alloc(vmp, vsp, addr, size);
 897         ASSERT(vsp->vs_type == VMEM_ALLOC &&
 898             vsp->vs_start == addr && vsp->vs_end == addr + size);
 899 
 900         /*
 901          * Advance the rotor to right after the newly-allocated segment.
 902          * That's where the next VM_NEXTFIT allocation will begin searching.
 903          */
 904         vmem_advance(vmp, rotor, vsp);
 905         mutex_exit(&vmp->vm_lock);
 906         return ((void *)addr);
 907 }
 908 
 909 /*
 910  * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
 911  * freelist.  If size is not a power-of-2, it can return a false-negative.
 912  *
 913  * Used to decide if a newly imported span is superfluous after re-acquiring
 914  * the arena lock.
 915  */
 916 static int
 917 vmem_canalloc(vmem_t *vmp, size_t size)
 918 {
 919         int hb;
 920         int flist = 0;
 921         ASSERT(MUTEX_HELD(&vmp->vm_lock));
 922 
 923         if (ISP2(size))
 924                 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
 925         else if ((hb = highbit(size)) < VMEM_FREELISTS)
 926                 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
 927 
 928         return (flist);
 929 }
 930 
 931 /*
 932  * Allocate size bytes at offset phase from an align boundary such that the
 933  * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
 934  * that does not straddle a nocross-aligned boundary.
 935  */
 936 void *
 937 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
 938     size_t nocross, void *minaddr, void *maxaddr, int vmflag)
 939 {
 940         vmem_seg_t *vsp;
 941         vmem_seg_t *vbest = NULL;
 942         uintptr_t addr, taddr, start, end;
 943         uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
 944         void *vaddr, *xvaddr = NULL;
 945         size_t xsize;
 946         int hb, flist, resv;
 947         uint32_t mtbf;
 948 
 949         if ((align | phase | nocross) & (vmp->vm_quantum - 1))
 950                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 951                     "parameters not vm_quantum aligned",
 952                     (void *)vmp, size, align_arg, phase, nocross,
 953                     minaddr, maxaddr, vmflag);
 954 
 955         if (nocross != 0 &&
 956             (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
 957                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 958                     "overconstrained allocation",
 959                     (void *)vmp, size, align_arg, phase, nocross,
 960                     minaddr, maxaddr, vmflag);
 961 
 962         if (phase >= align || !ISP2(align) || !ISP2(nocross))
 963                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
 964                     "parameters inconsistent or invalid",
 965                     (void *)vmp, size, align_arg, phase, nocross,
 966                     minaddr, maxaddr, vmflag);
 967 
 968         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
 969             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
 970                 return (NULL);
 971 
 972         mutex_enter(&vmp->vm_lock);
 973         for (;;) {
 974                 if (vmp->vm_nsegfree < VMEM_MINFREE &&
 975                     !vmem_populate(vmp, vmflag))
 976                         break;
 977 do_alloc:
 978                 /*
 979                  * highbit() returns the highest bit + 1, which is exactly
 980                  * what we want: we want to search the first freelist whose
 981                  * members are *definitely* large enough to satisfy our
 982                  * allocation.  However, there are certain cases in which we
 983                  * want to look at the next-smallest freelist (which *might*
 984                  * be able to satisfy the allocation):
 985                  *
 986                  * (1)  The size is exactly a power of 2, in which case
 987                  *      the smaller freelist is always big enough;
 988                  *
 989                  * (2)  All other freelists are empty;
 990                  *
 991                  * (3)  We're in the highest possible freelist, which is
 992                  *      always empty (e.g. the 4GB freelist on 32-bit systems);
 993                  *
 994                  * (4)  We're doing a best-fit or first-fit allocation.
 995                  */
 996                 if (ISP2(size)) {
 997                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
 998                 } else {
 999                         hb = highbit(size);
1000                         if ((vmp->vm_freemap >> hb) == 0 ||
1001                             hb == VMEM_FREELISTS ||
1002                             (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
1003                                 hb--;
1004                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1005                 }
1006 
1007                 for (vbest = NULL, vsp = (flist == 0) ? NULL :
1008                     vmp->vm_freelist[flist - 1].vs_knext;
1009                     vsp != NULL; vsp = vsp->vs_knext) {
1010                         vmp->vm_kstat.vk_search.value.ui64++;
1011                         if (vsp->vs_start == 0) {
1012                                 /*
1013                                  * We're moving up to a larger freelist,
1014                                  * so if we've already found a candidate,
1015                                  * the fit can't possibly get any better.
1016                                  */
1017                                 if (vbest != NULL)
1018                                         break;
1019                                 /*
1020                                  * Find the next non-empty freelist.
1021                                  */
1022                                 flist = lowbit(P2ALIGN(vmp->vm_freemap,
1023                                     VS_SIZE(vsp)));
1024                                 if (flist-- == 0)
1025                                         break;
1026                                 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
1027                                 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
1028                                 continue;
1029                         }
1030                         if (vsp->vs_end - 1 < (uintptr_t)minaddr)
1031                                 continue;
1032                         if (vsp->vs_start > (uintptr_t)maxaddr - 1)
1033                                 continue;
1034                         start = MAX(vsp->vs_start, (uintptr_t)minaddr);
1035                         end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
1036                         taddr = P2PHASEUP(start, align, phase);
1037                         if (P2BOUNDARY(taddr, size, nocross))
1038                                 taddr +=
1039                                     P2ROUNDUP(P2NPHASE(taddr, nocross), align);
1040                         if ((taddr - start) + size > end - start ||
1041                             (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
1042                                 continue;
1043                         vbest = vsp;
1044                         addr = taddr;
1045                         if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
1046                                 break;
1047                 }
1048                 if (vbest != NULL)
1049                         break;
1050                 ASSERT(xvaddr == NULL);
1051                 if (size == 0)
1052                         panic("vmem_xalloc(): size == 0");
1053                 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
1054                     minaddr == NULL && maxaddr == NULL) {
1055                         size_t aneeded, asize;
1056                         size_t aquantum = MAX(vmp->vm_quantum,
1057                             vmp->vm_source->vm_quantum);
1058                         size_t aphase = phase;
1059                         if ((align > aquantum) &&
1060                             !(vmp->vm_cflags & VMC_XALIGN)) {
1061                                 aphase = (P2PHASE(phase, aquantum) != 0) ?
1062                                     align - vmp->vm_quantum : align - aquantum;
1063                                 ASSERT(aphase >= phase);
1064                         }
1065                         aneeded = MAX(size + aphase, vmp->vm_min_import);
1066                         asize = P2ROUNDUP(aneeded, aquantum);
1067 
1068                         if (asize < size) {
1069                                 /*
1070                                  * The rounding induced overflow; return NULL
1071                                  * if we are permitted to fail the allocation
1072                                  * (and explicitly panic if we aren't).
1073                                  */
1074                                 if ((vmflag & VM_NOSLEEP) &&
1075                                     !(vmflag & VM_PANIC)) {
1076                                         mutex_exit(&vmp->vm_lock);
1077                                         return (NULL);
1078                                 }
1079 
1080                                 panic("vmem_xalloc(): size overflow");
1081                         }
1082 
1083                         /*
1084                          * Determine how many segment structures we'll consume.
1085                          * The calculation must be precise because if we're
1086                          * here on behalf of vmem_populate(), we are taking
1087                          * segments from a very limited reserve.
1088                          */
1089                         if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
1090                                 resv = VMEM_SEGS_PER_SPAN_CREATE +
1091                                     VMEM_SEGS_PER_EXACT_ALLOC;
1092                         else if (phase == 0 &&
1093                             align <= vmp->vm_source->vm_quantum)
1094                                 resv = VMEM_SEGS_PER_SPAN_CREATE +
1095                                     VMEM_SEGS_PER_LEFT_ALLOC;
1096                         else
1097                                 resv = VMEM_SEGS_PER_ALLOC_MAX;
1098 
1099                         ASSERT(vmp->vm_nsegfree >= resv);
1100                         vmp->vm_nsegfree -= resv;    /* reserve our segs */
1101                         mutex_exit(&vmp->vm_lock);
1102                         if (vmp->vm_cflags & VMC_XALLOC) {
1103                                 size_t oasize = asize;
1104                                 vaddr = ((vmem_ximport_t *)
1105                                     vmp->vm_source_alloc)(vmp->vm_source,
1106                                     &asize, align, vmflag & VM_KMFLAGS);
1107                                 ASSERT(asize >= oasize);
1108                                 ASSERT(P2PHASE(asize,
1109                                     vmp->vm_source->vm_quantum) == 0);
1110                                 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
1111                                     IS_P2ALIGNED(vaddr, align));
1112                         } else {
1113                                 vaddr = vmp->vm_source_alloc(vmp->vm_source,
1114                                     asize, vmflag & VM_KMFLAGS);
1115                         }
1116                         mutex_enter(&vmp->vm_lock);
1117                         vmp->vm_nsegfree += resv;    /* claim reservation */
1118                         aneeded = size + align - vmp->vm_quantum;
1119                         aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
1120                         if (vaddr != NULL) {
1121                                 /*
1122                                  * Since we dropped the vmem lock while
1123                                  * calling the import function, other
1124                                  * threads could have imported space
1125                                  * and made our import unnecessary.  In
1126                                  * order to save space, we return
1127                                  * excess imports immediately.
1128                                  */
1129                                 if (asize > aneeded &&
1130                                     vmp->vm_source_free != NULL &&
1131                                     vmem_canalloc(vmp, aneeded)) {
1132                                         ASSERT(resv >=
1133                                             VMEM_SEGS_PER_MIDDLE_ALLOC);
1134                                         xvaddr = vaddr;
1135                                         xsize = asize;
1136                                         goto do_alloc;
1137                                 }
1138                                 vbest = vmem_span_create(vmp, vaddr, asize, 1);
1139                                 addr = P2PHASEUP(vbest->vs_start, align, phase);
1140                                 break;
1141                         } else if (vmem_canalloc(vmp, aneeded)) {
1142                                 /*
1143                                  * Our import failed, but another thread
1144                                  * added sufficient free memory to the arena
1145                                  * to satisfy our request.  Go back and
1146                                  * grab it.
1147                                  */
1148                                 ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC);
1149                                 goto do_alloc;
1150                         }
1151                 }
1152 
1153                 /*
1154                  * If the requestor chooses to fail the allocation attempt
1155                  * rather than reap wait and retry - get out of the loop.
1156                  */
1157                 if (vmflag & VM_ABORT)
1158                         break;
1159                 mutex_exit(&vmp->vm_lock);
1160                 if (vmp->vm_cflags & VMC_IDENTIFIER)
1161                         kmem_reap_idspace();
1162                 else
1163                         kmem_reap();
1164                 mutex_enter(&vmp->vm_lock);
1165                 if (vmflag & VM_NOSLEEP)
1166                         break;
1167                 vmp->vm_kstat.vk_wait.value.ui64++;
1168                 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
1169         }
1170         if (vbest != NULL) {
1171                 ASSERT(vbest->vs_type == VMEM_FREE);
1172                 ASSERT(vbest->vs_knext != vbest);
1173                 /* re-position to end of buffer */
1174                 if (vmflag & VM_ENDALLOC) {
1175                         addr += ((vbest->vs_end - (addr + size)) / align) *
1176                             align;
1177                 }
1178                 (void) vmem_seg_alloc(vmp, vbest, addr, size);
1179                 mutex_exit(&vmp->vm_lock);
1180                 if (xvaddr)
1181                         vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
1182                 ASSERT(P2PHASE(addr, align) == phase);
1183                 ASSERT(!P2BOUNDARY(addr, size, nocross));
1184                 ASSERT(addr >= (uintptr_t)minaddr);
1185                 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
1186                 return ((void *)addr);
1187         }
1188         vmp->vm_kstat.vk_fail.value.ui64++;
1189         mutex_exit(&vmp->vm_lock);
1190         if (vmflag & VM_PANIC)
1191                 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1192                     "cannot satisfy mandatory allocation",
1193                     (void *)vmp, size, align_arg, phase, nocross,
1194                     minaddr, maxaddr, vmflag);
1195         ASSERT(xvaddr == NULL);
1196         return (NULL);
1197 }
1198 
1199 /*
1200  * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1201  * allocation.  vmem_xalloc() and vmem_xfree() must always be paired because
1202  * both routines bypass the quantum caches.
1203  */
1204 void
1205 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1206 {
1207         vmem_seg_t *vsp, *vnext, *vprev;
1208 
1209         mutex_enter(&vmp->vm_lock);
1210 
1211         vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1212         vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1213 
1214         /*
1215          * Attempt to coalesce with the next segment.
1216          */
1217         vnext = vsp->vs_anext;
1218         if (vnext->vs_type == VMEM_FREE) {
1219                 ASSERT(vsp->vs_end == vnext->vs_start);
1220                 vmem_freelist_delete(vmp, vnext);
1221                 vsp->vs_end = vnext->vs_end;
1222                 vmem_seg_destroy(vmp, vnext);
1223         }
1224 
1225         /*
1226          * Attempt to coalesce with the previous segment.
1227          */
1228         vprev = vsp->vs_aprev;
1229         if (vprev->vs_type == VMEM_FREE) {
1230                 ASSERT(vprev->vs_end == vsp->vs_start);
1231                 vmem_freelist_delete(vmp, vprev);
1232                 vprev->vs_end = vsp->vs_end;
1233                 vmem_seg_destroy(vmp, vsp);
1234                 vsp = vprev;
1235         }
1236 
1237         /*
1238          * If the entire span is free, return it to the source.
1239          */
1240         if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1241             vsp->vs_aprev->vs_type == VMEM_SPAN &&
1242             vsp->vs_anext->vs_type == VMEM_SPAN) {
1243                 vaddr = (void *)vsp->vs_start;
1244                 size = VS_SIZE(vsp);
1245                 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1246                 vmem_span_destroy(vmp, vsp);
1247                 mutex_exit(&vmp->vm_lock);
1248                 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1249         } else {
1250                 vmem_freelist_insert(vmp, vsp);
1251                 mutex_exit(&vmp->vm_lock);
1252         }
1253 }
1254 
1255 /*
1256  * Allocate size bytes from arena vmp.  Returns the allocated address
1257  * on success, NULL on failure.  vmflag specifies VM_SLEEP or VM_NOSLEEP,
1258  * and may also specify best-fit, first-fit, or next-fit allocation policy
1259  * instead of the default instant-fit policy.  VM_SLEEP allocations are
1260  * guaranteed to succeed.
1261  */
1262 void *
1263 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1264 {
1265         vmem_seg_t *vsp;
1266         uintptr_t addr;
1267         int hb;
1268         int flist = 0;
1269         uint32_t mtbf;
1270 
1271         if (size - 1 < vmp->vm_qcache_max)
1272                 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1273                     vmp->vm_qshift], vmflag & VM_KMFLAGS));
1274 
1275         if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1276             (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1277                 return (NULL);
1278 
1279         if (vmflag & VM_NEXTFIT)
1280                 return (vmem_nextfit_alloc(vmp, size, vmflag));
1281 
1282         if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1283                 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1284                     NULL, NULL, vmflag));
1285 
1286         /*
1287          * Unconstrained instant-fit allocation from the segment list.
1288          */
1289         mutex_enter(&vmp->vm_lock);
1290 
1291         if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1292                 if (ISP2(size))
1293                         flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1294                 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1295                         flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1296         }
1297 
1298         if (flist-- == 0) {
1299                 mutex_exit(&vmp->vm_lock);
1300                 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1301                     0, 0, NULL, NULL, vmflag));
1302         }
1303 
1304         ASSERT(size <= (1UL << flist));
1305         vsp = vmp->vm_freelist[flist].vs_knext;
1306         addr = vsp->vs_start;
1307         if (vmflag & VM_ENDALLOC) {
1308                 addr += vsp->vs_end - (addr + size);
1309         }
1310         (void) vmem_seg_alloc(vmp, vsp, addr, size);
1311         mutex_exit(&vmp->vm_lock);
1312         return ((void *)addr);
1313 }
1314 
1315 /*
1316  * Free the segment [vaddr, vaddr + size).
1317  */
1318 void
1319 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1320 {
1321         if (size - 1 < vmp->vm_qcache_max)
1322                 kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1323                     vaddr);
1324         else
1325                 vmem_xfree(vmp, vaddr, size);
1326 }
1327 
1328 /*
1329  * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1330  */
1331 int
1332 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1333 {
1334         uintptr_t start = (uintptr_t)vaddr;
1335         uintptr_t end = start + size;
1336         vmem_seg_t *vsp;
1337         vmem_seg_t *seg0 = &vmp->vm_seg0;
1338 
1339         mutex_enter(&vmp->vm_lock);
1340         vmp->vm_kstat.vk_contains.value.ui64++;
1341         for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1342                 vmp->vm_kstat.vk_contains_search.value.ui64++;
1343                 ASSERT(vsp->vs_type == VMEM_SPAN);
1344                 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1345                         break;
1346         }
1347         mutex_exit(&vmp->vm_lock);
1348         return (vsp != seg0);
1349 }
1350 
1351 /*
1352  * Add the span [vaddr, vaddr + size) to arena vmp.
1353  */
1354 void *
1355 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1356 {
1357         if (vaddr == NULL || size == 0)
1358                 panic("vmem_add(%p, %p, %lu): bad arguments",
1359                     (void *)vmp, vaddr, size);
1360 
1361         ASSERT(!vmem_contains(vmp, vaddr, size));
1362 
1363         mutex_enter(&vmp->vm_lock);
1364         if (vmem_populate(vmp, vmflag))
1365                 (void) vmem_span_create(vmp, vaddr, size, 0);
1366         else
1367                 vaddr = NULL;
1368         mutex_exit(&vmp->vm_lock);
1369         return (vaddr);
1370 }
1371 
1372 /*
1373  * Walk the vmp arena, applying func to each segment matching typemask.
1374  * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1375  * call to func(); otherwise, it is held for the duration of vmem_walk()
1376  * to ensure a consistent snapshot.  Note that VMEM_REENTRANT callbacks
1377  * are *not* necessarily consistent, so they may only be used when a hint
1378  * is adequate.
1379  */
1380 void
1381 vmem_walk(vmem_t *vmp, int typemask,
1382     void (*func)(void *, void *, size_t), void *arg)
1383 {
1384         vmem_seg_t *vsp;
1385         vmem_seg_t *seg0 = &vmp->vm_seg0;
1386         vmem_seg_t walker;
1387 
1388         if (typemask & VMEM_WALKER)
1389                 return;
1390 
1391         bzero(&walker, sizeof (walker));
1392         walker.vs_type = VMEM_WALKER;
1393 
1394         mutex_enter(&vmp->vm_lock);
1395         VMEM_INSERT(seg0, &walker, a);
1396         for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1397                 if (vsp->vs_type & typemask) {
1398                         void *start = (void *)vsp->vs_start;
1399                         size_t size = VS_SIZE(vsp);
1400                         if (typemask & VMEM_REENTRANT) {
1401                                 vmem_advance(vmp, &walker, vsp);
1402                                 mutex_exit(&vmp->vm_lock);
1403                                 func(arg, start, size);
1404                                 mutex_enter(&vmp->vm_lock);
1405                                 vsp = &walker;
1406                         } else {
1407                                 func(arg, start, size);
1408                         }
1409                 }
1410         }
1411         vmem_advance(vmp, &walker, NULL);
1412         mutex_exit(&vmp->vm_lock);
1413 }
1414 
1415 /*
1416  * Return the total amount of memory whose type matches typemask.  Thus:
1417  *
1418  *      typemask VMEM_ALLOC yields total memory allocated (in use).
1419  *      typemask VMEM_FREE yields total memory free (available).
1420  *      typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1421  */
1422 size_t
1423 vmem_size(vmem_t *vmp, int typemask)
1424 {
1425         uint64_t size = 0;
1426 
1427         if (typemask & VMEM_ALLOC)
1428                 size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
1429         if (typemask & VMEM_FREE)
1430                 size += vmp->vm_kstat.vk_mem_total.value.ui64 -
1431                     vmp->vm_kstat.vk_mem_inuse.value.ui64;
1432         return ((size_t)size);
1433 }
1434 
1435 /*
1436  * Create an arena called name whose initial span is [base, base + size).
1437  * The arena's natural unit of currency is quantum, so vmem_alloc()
1438  * guarantees quantum-aligned results.  The arena may import new spans
1439  * by invoking afunc() on source, and may return those spans by invoking
1440  * ffunc() on source.  To make small allocations fast and scalable,
1441  * the arena offers high-performance caching for each integer multiple
1442  * of quantum up to qcache_max.
1443  */
1444 static vmem_t *
1445 vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1446     void *(*afunc)(vmem_t *, size_t, int),
1447     void (*ffunc)(vmem_t *, void *, size_t),
1448     vmem_t *source, size_t qcache_max, int vmflag)
1449 {
1450         int i;
1451         size_t nqcache;
1452         vmem_t *vmp, *cur, **vmpp;
1453         vmem_seg_t *vsp;
1454         vmem_freelist_t *vfp;
1455         uint32_t id = atomic_inc_32_nv(&vmem_id);
1456 
1457         if (vmem_vmem_arena != NULL) {
1458                 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1459                     vmflag & VM_KMFLAGS);
1460         } else {
1461                 ASSERT(id <= VMEM_INITIAL);
1462                 vmp = &vmem0[id - 1];
1463         }
1464 
1465         /* An identifier arena must inherit from another identifier arena */
1466         ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1467             (vmflag & VMC_IDENTIFIER)));
1468 
1469         if (vmp == NULL)
1470                 return (NULL);
1471         bzero(vmp, sizeof (vmem_t));
1472 
1473         (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1474         mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1475         cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1476         vmp->vm_cflags = vmflag;
1477         vmflag &= VM_KMFLAGS;
1478 
1479         vmp->vm_quantum = quantum;
1480         vmp->vm_qshift = highbit(quantum) - 1;
1481         nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1482 
1483         for (i = 0; i <= VMEM_FREELISTS; i++) {
1484                 vfp = &vmp->vm_freelist[i];
1485                 vfp->vs_end = 1UL << i;
1486                 vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1487                 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1488         }
1489 
1490         vmp->vm_freelist[0].vs_kprev = NULL;
1491         vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1492         vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1493         vmp->vm_hash_table = vmp->vm_hash0;
1494         vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1495         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1496 
1497         vsp = &vmp->vm_seg0;
1498         vsp->vs_anext = vsp;
1499         vsp->vs_aprev = vsp;
1500         vsp->vs_knext = vsp;
1501         vsp->vs_kprev = vsp;
1502         vsp->vs_type = VMEM_SPAN;
1503 
1504         vsp = &vmp->vm_rotor;
1505         vsp->vs_type = VMEM_ROTOR;
1506         VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1507 
1508         bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
1509 
1510         vmp->vm_id = id;
1511         if (source != NULL)
1512                 vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
1513         vmp->vm_source = source;
1514         vmp->vm_source_alloc = afunc;
1515         vmp->vm_source_free = ffunc;
1516 
1517         /*
1518          * Some arenas (like vmem_metadata and kmem_metadata) cannot
1519          * use quantum caching to lower fragmentation.  Instead, we
1520          * increase their imports, giving a similar effect.
1521          */
1522         if (vmp->vm_cflags & VMC_NO_QCACHE) {
1523                 vmp->vm_min_import =
1524                     VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
1525                 nqcache = 0;
1526         }
1527 
1528         if (nqcache != 0) {
1529                 ASSERT(!(vmflag & VM_NOSLEEP));
1530                 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1531                 for (i = 0; i < nqcache; i++) {
1532                         char buf[VMEM_NAMELEN + 21];
1533                         (void) sprintf(buf, "%s_%lu", vmp->vm_name,
1534                             (i + 1) * quantum);
1535                         vmp->vm_qcache[i] = kmem_cache_create(buf,
1536                             (i + 1) * quantum, quantum, NULL, NULL, NULL,
1537                             NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1538                 }
1539         }
1540 
1541         if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1542             "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1543             sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1544                 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1545                 kstat_install(vmp->vm_ksp);
1546         }
1547 
1548         mutex_enter(&vmem_list_lock);
1549         vmpp = &vmem_list;
1550         while ((cur = *vmpp) != NULL)
1551                 vmpp = &cur->vm_next;
1552         *vmpp = vmp;
1553         mutex_exit(&vmem_list_lock);
1554 
1555         if (vmp->vm_cflags & VMC_POPULATOR) {
1556                 ASSERT(vmem_populators < VMEM_INITIAL);
1557                 vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
1558                 mutex_enter(&vmp->vm_lock);
1559                 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1560                 mutex_exit(&vmp->vm_lock);
1561         }
1562 
1563         if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1564                 vmem_destroy(vmp);
1565                 return (NULL);
1566         }
1567 
1568         return (vmp);
1569 }
1570 
1571 vmem_t *
1572 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1573     vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1574     size_t qcache_max, int vmflag)
1575 {
1576         ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1577         vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
1578 
1579         return (vmem_create_common(name, base, size, quantum,
1580             (vmem_alloc_t *)afunc, ffunc, source, qcache_max,
1581             vmflag | VMC_XALLOC));
1582 }
1583 
1584 vmem_t *
1585 vmem_create(const char *name, void *base, size_t size, size_t quantum,
1586     vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1587     size_t qcache_max, int vmflag)
1588 {
1589         ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN)));
1590         vmflag &= ~(VMC_XALLOC | VMC_XALIGN);
1591 
1592         return (vmem_create_common(name, base, size, quantum,
1593             afunc, ffunc, source, qcache_max, vmflag));
1594 }
1595 
1596 /*
1597  * Destroy arena vmp.
1598  */
1599 void
1600 vmem_destroy(vmem_t *vmp)
1601 {
1602         vmem_t *cur, **vmpp;
1603         vmem_seg_t *seg0 = &vmp->vm_seg0;
1604         vmem_seg_t *vsp, *anext;
1605         size_t leaked;
1606         int i;
1607 
1608         mutex_enter(&vmem_list_lock);
1609         vmpp = &vmem_list;
1610         while ((cur = *vmpp) != vmp)
1611                 vmpp = &cur->vm_next;
1612         *vmpp = vmp->vm_next;
1613         mutex_exit(&vmem_list_lock);
1614 
1615         for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1616                 if (vmp->vm_qcache[i])
1617                         kmem_cache_destroy(vmp->vm_qcache[i]);
1618 
1619         leaked = vmem_size(vmp, VMEM_ALLOC);
1620         if (leaked != 0)
1621                 cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s",
1622                     vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
1623                     "identifiers" : "bytes");
1624 
1625         if (vmp->vm_hash_table != vmp->vm_hash0)
1626                 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1627                     (vmp->vm_hash_mask + 1) * sizeof (void *));
1628 
1629         /*
1630          * Give back the segment structures for anything that's left in the
1631          * arena, e.g. the primary spans and their free segments.
1632          */
1633         VMEM_DELETE(&vmp->vm_rotor, a);
1634         for (vsp = seg0->vs_anext; vsp != seg0; vsp = anext) {
1635                 anext = vsp->vs_anext;
1636                 vmem_putseg_global(vsp);
1637         }
1638 
1639         while (vmp->vm_nsegfree > 0)
1640                 vmem_putseg_global(vmem_getseg(vmp));
1641 
1642         kstat_delete(vmp->vm_ksp);
1643 
1644         mutex_destroy(&vmp->vm_lock);
1645         cv_destroy(&vmp->vm_cv);
1646         vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1647 }
1648 
1649 /*
1650  * Only shrink vmem hashtable if it is 1<<vmem_rescale_minshift times (8x)
1651  * larger than necessary.
1652  */
1653 int vmem_rescale_minshift = 3;
1654 
1655 /*
1656  * Resize vmp's hash table to keep the average lookup depth near 1.0.
1657  */
1658 static void
1659 vmem_hash_rescale(vmem_t *vmp)
1660 {
1661         vmem_seg_t **old_table, **new_table, *vsp;
1662         size_t old_size, new_size, h, nseg;
1663 
1664         nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
1665             vmp->vm_kstat.vk_free.value.ui64);
1666 
1667         new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1668         old_size = vmp->vm_hash_mask + 1;
1669 
1670         if ((old_size >> vmem_rescale_minshift) <= new_size &&
1671             new_size <= (old_size << 1))
1672                 return;
1673 
1674         new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1675             VM_NOSLEEP);
1676         if (new_table == NULL)
1677                 return;
1678         bzero(new_table, new_size * sizeof (void *));
1679 
1680         mutex_enter(&vmp->vm_lock);
1681 
1682         old_size = vmp->vm_hash_mask + 1;
1683         old_table = vmp->vm_hash_table;
1684 
1685         vmp->vm_hash_mask = new_size - 1;
1686         vmp->vm_hash_table = new_table;
1687         vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1688 
1689         for (h = 0; h < old_size; h++) {
1690                 vsp = old_table[h];
1691                 while (vsp != NULL) {
1692                         uintptr_t addr = vsp->vs_start;
1693                         vmem_seg_t *next_vsp = vsp->vs_knext;
1694                         vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1695                         vsp->vs_knext = *hash_bucket;
1696                         *hash_bucket = vsp;
1697                         vsp = next_vsp;
1698                 }
1699         }
1700 
1701         mutex_exit(&vmp->vm_lock);
1702 
1703         if (old_table != vmp->vm_hash0)
1704                 vmem_free(vmem_hash_arena, old_table,
1705                     old_size * sizeof (void *));
1706 }
1707 
1708 /*
1709  * Perform periodic maintenance on all vmem arenas.
1710  */
1711 void
1712 vmem_update(void *dummy)
1713 {
1714         vmem_t *vmp;
1715 
1716         mutex_enter(&vmem_list_lock);
1717         for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1718                 /*
1719                  * If threads are waiting for resources, wake them up
1720                  * periodically so they can issue another kmem_reap()
1721                  * to reclaim resources cached by the slab allocator.
1722                  */
1723                 cv_broadcast(&vmp->vm_cv);
1724 
1725                 /*
1726                  * Rescale the hash table to keep the hash chains short.
1727                  */
1728                 vmem_hash_rescale(vmp);
1729         }
1730         mutex_exit(&vmem_list_lock);
1731 
1732         (void) timeout(vmem_update, dummy, vmem_update_interval * hz);
1733 }
1734 
1735 void
1736 vmem_qcache_reap(vmem_t *vmp)
1737 {
1738         int i;
1739 
1740         /*
1741          * Reap any quantum caches that may be part of this vmem.
1742          */
1743         for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1744                 if (vmp->vm_qcache[i])
1745                         kmem_cache_reap_now(vmp->vm_qcache[i]);
1746 }
1747 
1748 /*
1749  * Prepare vmem for use.
1750  */
1751 vmem_t *
1752 vmem_init(const char *heap_name,
1753     void *heap_start, size_t heap_size, size_t heap_quantum,
1754     void *(*heap_alloc)(vmem_t *, size_t, int),
1755     void (*heap_free)(vmem_t *, void *, size_t))
1756 {
1757         uint32_t id;
1758         int nseg = VMEM_SEG_INITIAL;
1759         vmem_t *heap;
1760 
1761         while (--nseg >= 0)
1762                 vmem_putseg_global(&vmem_seg0[nseg]);
1763 
1764         heap = vmem_create(heap_name,
1765             heap_start, heap_size, heap_quantum,
1766             NULL, NULL, NULL, 0,
1767             VM_SLEEP | VMC_POPULATOR);
1768 
1769         vmem_metadata_arena = vmem_create("vmem_metadata",
1770             NULL, 0, heap_quantum,
1771             vmem_alloc, vmem_free, heap, 8 * heap_quantum,
1772             VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE);
1773 
1774         vmem_seg_arena = vmem_create("vmem_seg",
1775             NULL, 0, heap_quantum,
1776             heap_alloc, heap_free, vmem_metadata_arena, 0,
1777             VM_SLEEP | VMC_POPULATOR);
1778 
1779         vmem_hash_arena = vmem_create("vmem_hash",
1780             NULL, 0, 8,
1781             heap_alloc, heap_free, vmem_metadata_arena, 0,
1782             VM_SLEEP);
1783 
1784         vmem_vmem_arena = vmem_create("vmem_vmem",
1785             vmem0, sizeof (vmem0), 1,
1786             heap_alloc, heap_free, vmem_metadata_arena, 0,
1787             VM_SLEEP);
1788 
1789         for (id = 0; id < vmem_id; id++)
1790                 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1791                     1, 0, 0, &vmem0[id], &vmem0[id + 1],
1792                     VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1793 
1794         return (heap);
1795 }