1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  25  * Copyright 2018, Joyent, Inc.
  26  */
  27 
  28 /*
  29  * Kernel memory allocator, as described in the following two papers and a
  30  * statement about the consolidator:
  31  *
  32  * Jeff Bonwick,
  33  * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
  34  * Proceedings of the Summer 1994 Usenix Conference.
  35  * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
  36  *
  37  * Jeff Bonwick and Jonathan Adams,
  38  * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
  39  * Arbitrary Resources.
  40  * Proceedings of the 2001 Usenix Conference.
  41  * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
  42  *
  43  * kmem Slab Consolidator Big Theory Statement:
  44  *
  45  * 1. Motivation
  46  *
  47  * As stated in Bonwick94, slabs provide the following advantages over other
  48  * allocation structures in terms of memory fragmentation:
  49  *
  50  *  - Internal fragmentation (per-buffer wasted space) is minimal.
  51  *  - Severe external fragmentation (unused buffers on the free list) is
  52  *    unlikely.
  53  *
  54  * Segregating objects by size eliminates one source of external fragmentation,
  55  * and according to Bonwick:
  56  *
  57  *   The other reason that slabs reduce external fragmentation is that all
  58  *   objects in a slab are of the same type, so they have the same lifetime
  59  *   distribution. The resulting segregation of short-lived and long-lived
  60  *   objects at slab granularity reduces the likelihood of an entire page being
  61  *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
  62  *
  63  * While unlikely, severe external fragmentation remains possible. Clients that
  64  * allocate both short- and long-lived objects from the same cache cannot
  65  * anticipate the distribution of long-lived objects within the allocator's slab
  66  * implementation. Even a small percentage of long-lived objects distributed
  67  * randomly across many slabs can lead to a worst case scenario where the client
  68  * frees the majority of its objects and the system gets back almost none of the
  69  * slabs. Despite the client doing what it reasonably can to help the system
  70  * reclaim memory, the allocator cannot shake free enough slabs because of
  71  * lonely allocations stubbornly hanging on. Although the allocator is in a
  72  * position to diagnose the fragmentation, there is nothing that the allocator
  73  * by itself can do about it. It only takes a single allocated object to prevent
  74  * an entire slab from being reclaimed, and any object handed out by
  75  * kmem_cache_alloc() is by definition in the client's control. Conversely,
  76  * although the client is in a position to move a long-lived object, it has no
  77  * way of knowing if the object is causing fragmentation, and if so, where to
  78  * move it. A solution necessarily requires further cooperation between the
  79  * allocator and the client.
  80  *
  81  * 2. Move Callback
  82  *
  83  * The kmem slab consolidator therefore adds a move callback to the
  84  * allocator/client interface, improving worst-case external fragmentation in
  85  * kmem caches that supply a function to move objects from one memory location
  86  * to another. In a situation of low memory kmem attempts to consolidate all of
  87  * a cache's slabs at once; otherwise it works slowly to bring external
  88  * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
  89  * thereby helping to avoid a low memory situation in the future.
  90  *
  91  * The callback has the following signature:
  92  *
  93  *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
  94  *
  95  * It supplies the kmem client with two addresses: the allocated object that
  96  * kmem wants to move and a buffer selected by kmem for the client to use as the
  97  * copy destination. The callback is kmem's way of saying "Please get off of
  98  * this buffer and use this one instead." kmem knows where it wants to move the
  99  * object in order to best reduce fragmentation. All the client needs to know
 100  * about the second argument (void *new) is that it is an allocated, constructed
 101  * object ready to take the contents of the old object. When the move function
 102  * is called, the system is likely to be low on memory, and the new object
 103  * spares the client from having to worry about allocating memory for the
 104  * requested move. The third argument supplies the size of the object, in case a
 105  * single move function handles multiple caches whose objects differ only in
 106  * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
 107  * user argument passed to the constructor, destructor, and reclaim functions is
 108  * also passed to the move callback.
 109  *
 110  * 2.1 Setting the Move Callback
 111  *
 112  * The client sets the move callback after creating the cache and before
 113  * allocating from it:
 114  *
 115  *      object_cache = kmem_cache_create(...);
 116  *      kmem_cache_set_move(object_cache, object_move);
 117  *
 118  * 2.2 Move Callback Return Values
 119  *
 120  * Only the client knows about its own data and when is a good time to move it.
 121  * The client is cooperating with kmem to return unused memory to the system,
 122  * and kmem respectfully accepts this help at the client's convenience. When
 123  * asked to move an object, the client can respond with any of the following:
 124  *
 125  *   typedef enum kmem_cbrc {
 126  *           KMEM_CBRC_YES,
 127  *           KMEM_CBRC_NO,
 128  *           KMEM_CBRC_LATER,
 129  *           KMEM_CBRC_DONT_NEED,
 130  *           KMEM_CBRC_DONT_KNOW
 131  *   } kmem_cbrc_t;
 132  *
 133  * The client must not explicitly kmem_cache_free() either of the objects passed
 134  * to the callback, since kmem wants to free them directly to the slab layer
 135  * (bypassing the per-CPU magazine layer). The response tells kmem which of the
 136  * objects to free:
 137  *
 138  *       YES: (Did it) The client moved the object, so kmem frees the old one.
 139  *        NO: (Never) The client refused, so kmem frees the new object (the
 140  *            unused copy destination). kmem also marks the slab of the old
 141  *            object so as not to bother the client with further callbacks for
 142  *            that object as long as the slab remains on the partial slab list.
 143  *            (The system won't be getting the slab back as long as the
 144  *            immovable object holds it hostage, so there's no point in moving
 145  *            any of its objects.)
 146  *     LATER: The client is using the object and cannot move it now, so kmem
 147  *            frees the new object (the unused copy destination). kmem still
 148  *            attempts to move other objects off the slab, since it expects to
 149  *            succeed in clearing the slab in a later callback. The client
 150  *            should use LATER instead of NO if the object is likely to become
 151  *            movable very soon.
 152  * DONT_NEED: The client no longer needs the object, so kmem frees the old along
 153  *            with the new object (the unused copy destination). This response
 154  *            is the client's opportunity to be a model citizen and give back as
 155  *            much as it can.
 156  * DONT_KNOW: The client does not know about the object because
 157  *            a) the client has just allocated the object and not yet put it
 158  *               wherever it expects to find known objects
 159  *            b) the client has removed the object from wherever it expects to
 160  *               find known objects and is about to free it, or
 161  *            c) the client has freed the object.
 162  *            In all these cases (a, b, and c) kmem frees the new object (the
 163  *            unused copy destination).  In the first case, the object is in
 164  *            use and the correct action is that for LATER; in the latter two
 165  *            cases, we know that the object is either freed or about to be
 166  *            freed, in which case it is either already in a magazine or about
 167  *            to be in one.  In these cases, we know that the object will either
 168  *            be reallocated and reused, or it will end up in a full magazine
 169  *            that will be reaped (thereby liberating the slab).  Because it
 170  *            is prohibitively expensive to differentiate these cases, and
 171  *            because the defrag code is executed when we're low on memory
 172  *            (thereby biasing the system to reclaim full magazines) we treat
 173  *            all DONT_KNOW cases as LATER and rely on cache reaping to
 174  *            generally clean up full magazines.  While we take the same action
 175  *            for these cases, we maintain their semantic distinction:  if
 176  *            defragmentation is not occurring, it is useful to know if this
 177  *            is due to objects in use (LATER) or objects in an unknown state
 178  *            of transition (DONT_KNOW).
 179  *
 180  * 2.3 Object States
 181  *
 182  * Neither kmem nor the client can be assumed to know the object's whereabouts
 183  * at the time of the callback. An object belonging to a kmem cache may be in
 184  * any of the following states:
 185  *
 186  * 1. Uninitialized on the slab
 187  * 2. Allocated from the slab but not constructed (still uninitialized)
 188  * 3. Allocated from the slab, constructed, but not yet ready for business
 189  *    (not in a valid state for the move callback)
 190  * 4. In use (valid and known to the client)
 191  * 5. About to be freed (no longer in a valid state for the move callback)
 192  * 6. Freed to a magazine (still constructed)
 193  * 7. Allocated from a magazine, not yet ready for business (not in a valid
 194  *    state for the move callback), and about to return to state #4
 195  * 8. Deconstructed on a magazine that is about to be freed
 196  * 9. Freed to the slab
 197  *
 198  * Since the move callback may be called at any time while the object is in any
 199  * of the above states (except state #1), the client needs a safe way to
 200  * determine whether or not it knows about the object. Specifically, the client
 201  * needs to know whether or not the object is in state #4, the only state in
 202  * which a move is valid. If the object is in any other state, the client should
 203  * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
 204  * the object's fields.
 205  *
 206  * Note that although an object may be in state #4 when kmem initiates the move
 207  * request, the object may no longer be in that state by the time kmem actually
 208  * calls the move function. Not only does the client free objects
 209  * asynchronously, kmem itself puts move requests on a queue where thay are
 210  * pending until kmem processes them from another context. Also, objects freed
 211  * to a magazine appear allocated from the point of view of the slab layer, so
 212  * kmem may even initiate requests for objects in a state other than state #4.
 213  *
 214  * 2.3.1 Magazine Layer
 215  *
 216  * An important insight revealed by the states listed above is that the magazine
 217  * layer is populated only by kmem_cache_free(). Magazines of constructed
 218  * objects are never populated directly from the slab layer (which contains raw,
 219  * unconstructed objects). Whenever an allocation request cannot be satisfied
 220  * from the magazine layer, the magazines are bypassed and the request is
 221  * satisfied from the slab layer (creating a new slab if necessary). kmem calls
 222  * the object constructor only when allocating from the slab layer, and only in
 223  * response to kmem_cache_alloc() or to prepare the destination buffer passed in
 224  * the move callback. kmem does not preconstruct objects in anticipation of
 225  * kmem_cache_alloc().
 226  *
 227  * 2.3.2 Object Constructor and Destructor
 228  *
 229  * If the client supplies a destructor, it must be valid to call the destructor
 230  * on a newly created object (immediately after the constructor).
 231  *
 232  * 2.4 Recognizing Known Objects
 233  *
 234  * There is a simple test to determine safely whether or not the client knows
 235  * about a given object in the move callback. It relies on the fact that kmem
 236  * guarantees that the object of the move callback has only been touched by the
 237  * client itself or else by kmem. kmem does this by ensuring that none of the
 238  * cache's slabs are freed to the virtual memory (VM) subsystem while a move
 239  * callback is pending. When the last object on a slab is freed, if there is a
 240  * pending move, kmem puts the slab on a per-cache dead list and defers freeing
 241  * slabs on that list until all pending callbacks are completed. That way,
 242  * clients can be certain that the object of a move callback is in one of the
 243  * states listed above, making it possible to distinguish known objects (in
 244  * state #4) using the two low order bits of any pointer member (with the
 245  * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
 246  * platforms).
 247  *
 248  * The test works as long as the client always transitions objects from state #4
 249  * (known, in use) to state #5 (about to be freed, invalid) by setting the low
 250  * order bit of the client-designated pointer member. Since kmem only writes
 251  * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
 252  * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
 253  * guaranteed to set at least one of the two low order bits. Therefore, given an
 254  * object with a back pointer to a 'container_t *o_container', the client can
 255  * test
 256  *
 257  *      container_t *container = object->o_container;
 258  *      if ((uintptr_t)container & 0x3) {
 259  *              return (KMEM_CBRC_DONT_KNOW);
 260  *      }
 261  *
 262  * Typically, an object will have a pointer to some structure with a list or
 263  * hash where objects from the cache are kept while in use. Assuming that the
 264  * client has some way of knowing that the container structure is valid and will
 265  * not go away during the move, and assuming that the structure includes a lock
 266  * to protect whatever collection is used, then the client would continue as
 267  * follows:
 268  *
 269  *      // Ensure that the container structure does not go away.
 270  *      if (container_hold(container) == 0) {
 271  *              return (KMEM_CBRC_DONT_KNOW);
 272  *      }
 273  *      mutex_enter(&container->c_objects_lock);
 274  *      if (container != object->o_container) {
 275  *              mutex_exit(&container->c_objects_lock);
 276  *              container_rele(container);
 277  *              return (KMEM_CBRC_DONT_KNOW);
 278  *      }
 279  *
 280  * At this point the client knows that the object cannot be freed as long as
 281  * c_objects_lock is held. Note that after acquiring the lock, the client must
 282  * recheck the o_container pointer in case the object was removed just before
 283  * acquiring the lock.
 284  *
 285  * When the client is about to free an object, it must first remove that object
 286  * from the list, hash, or other structure where it is kept. At that time, to
 287  * mark the object so it can be distinguished from the remaining, known objects,
 288  * the client sets the designated low order bit:
 289  *
 290  *      mutex_enter(&container->c_objects_lock);
 291  *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
 292  *      list_remove(&container->c_objects, object);
 293  *      mutex_exit(&container->c_objects_lock);
 294  *
 295  * In the common case, the object is freed to the magazine layer, where it may
 296  * be reused on a subsequent allocation without the overhead of calling the
 297  * constructor. While in the magazine it appears allocated from the point of
 298  * view of the slab layer, making it a candidate for the move callback. Most
 299  * objects unrecognized by the client in the move callback fall into this
 300  * category and are cheaply distinguished from known objects by the test
 301  * described earlier. Because searching magazines is prohibitively expensive
 302  * for kmem, clients that do not mark freed objects (and therefore return
 303  * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
 304  * efficacy reduced.
 305  *
 306  * Invalidating the designated pointer member before freeing the object marks
 307  * the object to be avoided in the callback, and conversely, assigning a valid
 308  * value to the designated pointer member after allocating the object makes the
 309  * object fair game for the callback:
 310  *
 311  *      ... allocate object ...
 312  *      ... set any initial state not set by the constructor ...
 313  *
 314  *      mutex_enter(&container->c_objects_lock);
 315  *      list_insert_tail(&container->c_objects, object);
 316  *      membar_producer();
 317  *      object->o_container = container;
 318  *      mutex_exit(&container->c_objects_lock);
 319  *
 320  * Note that everything else must be valid before setting o_container makes the
 321  * object fair game for the move callback. The membar_producer() call ensures
 322  * that all the object's state is written to memory before setting the pointer
 323  * that transitions the object from state #3 or #7 (allocated, constructed, not
 324  * yet in use) to state #4 (in use, valid). That's important because the move
 325  * function has to check the validity of the pointer before it can safely
 326  * acquire the lock protecting the collection where it expects to find known
 327  * objects.
 328  *
 329  * This method of distinguishing known objects observes the usual symmetry:
 330  * invalidating the designated pointer is the first thing the client does before
 331  * freeing the object, and setting the designated pointer is the last thing the
 332  * client does after allocating the object. Of course, the client is not
 333  * required to use this method. Fundamentally, how the client recognizes known
 334  * objects is completely up to the client, but this method is recommended as an
 335  * efficient and safe way to take advantage of the guarantees made by kmem. If
 336  * the entire object is arbitrary data without any markable bits from a suitable
 337  * pointer member, then the client must find some other method, such as
 338  * searching a hash table of known objects.
 339  *
 340  * 2.5 Preventing Objects From Moving
 341  *
 342  * Besides a way to distinguish known objects, the other thing that the client
 343  * needs is a strategy to ensure that an object will not move while the client
 344  * is actively using it. The details of satisfying this requirement tend to be
 345  * highly cache-specific. It might seem that the same rules that let a client
 346  * remove an object safely should also decide when an object can be moved
 347  * safely. However, any object state that makes a removal attempt invalid is
 348  * likely to be long-lasting for objects that the client does not expect to
 349  * remove. kmem knows nothing about the object state and is equally likely (from
 350  * the client's point of view) to request a move for any object in the cache,
 351  * whether prepared for removal or not. Even a low percentage of objects stuck
 352  * in place by unremovability will defeat the consolidator if the stuck objects
 353  * are the same long-lived allocations likely to hold slabs hostage.
 354  * Fundamentally, the consolidator is not aimed at common cases. Severe external
 355  * fragmentation is a worst case scenario manifested as sparsely allocated
 356  * slabs, by definition a low percentage of the cache's objects. When deciding
 357  * what makes an object movable, keep in mind the goal of the consolidator: to
 358  * bring worst-case external fragmentation within the limits guaranteed for
 359  * internal fragmentation. Removability is a poor criterion if it is likely to
 360  * exclude more than an insignificant percentage of objects for long periods of
 361  * time.
 362  *
 363  * A tricky general solution exists, and it has the advantage of letting you
 364  * move any object at almost any moment, practically eliminating the likelihood
 365  * that an object can hold a slab hostage. However, if there is a cache-specific
 366  * way to ensure that an object is not actively in use in the vast majority of
 367  * cases, a simpler solution that leverages this cache-specific knowledge is
 368  * preferred.
 369  *
 370  * 2.5.1 Cache-Specific Solution
 371  *
 372  * As an example of a cache-specific solution, the ZFS znode cache takes
 373  * advantage of the fact that the vast majority of znodes are only being
 374  * referenced from the DNLC. (A typical case might be a few hundred in active
 375  * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
 376  * client has established that it recognizes the znode and can access its fields
 377  * safely (using the method described earlier), it then tests whether the znode
 378  * is referenced by anything other than the DNLC. If so, it assumes that the
 379  * znode may be in active use and is unsafe to move, so it drops its locks and
 380  * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
 381  * else znodes are used, no change is needed to protect against the possibility
 382  * of the znode moving. The disadvantage is that it remains possible for an
 383  * application to hold a znode slab hostage with an open file descriptor.
 384  * However, this case ought to be rare and the consolidator has a way to deal
 385  * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
 386  * object, kmem eventually stops believing it and treats the slab as if the
 387  * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
 388  * then focus on getting it off of the partial slab list by allocating rather
 389  * than freeing all of its objects. (Either way of getting a slab off the
 390  * free list reduces fragmentation.)
 391  *
 392  * 2.5.2 General Solution
 393  *
 394  * The general solution, on the other hand, requires an explicit hold everywhere
 395  * the object is used to prevent it from moving. To keep the client locking
 396  * strategy as uncomplicated as possible, kmem guarantees the simplifying
 397  * assumption that move callbacks are sequential, even across multiple caches.
 398  * Internally, a global queue processed by a single thread supports all caches
 399  * implementing the callback function. No matter how many caches supply a move
 400  * function, the consolidator never moves more than one object at a time, so the
 401  * client does not have to worry about tricky lock ordering involving several
 402  * related objects from different kmem caches.
 403  *
 404  * The general solution implements the explicit hold as a read-write lock, which
 405  * allows multiple readers to access an object from the cache simultaneously
 406  * while a single writer is excluded from moving it. A single rwlock for the
 407  * entire cache would lock out all threads from using any of the cache's objects
 408  * even though only a single object is being moved, so to reduce contention,
 409  * the client can fan out the single rwlock into an array of rwlocks hashed by
 410  * the object address, making it probable that moving one object will not
 411  * prevent other threads from using a different object. The rwlock cannot be a
 412  * member of the object itself, because the possibility of the object moving
 413  * makes it unsafe to access any of the object's fields until the lock is
 414  * acquired.
 415  *
 416  * Assuming a small, fixed number of locks, it's possible that multiple objects
 417  * will hash to the same lock. A thread that needs to use multiple objects in
 418  * the same function may acquire the same lock multiple times. Since rwlocks are
 419  * reentrant for readers, and since there is never more than a single writer at
 420  * a time (assuming that the client acquires the lock as a writer only when
 421  * moving an object inside the callback), there would seem to be no problem.
 422  * However, a client locking multiple objects in the same function must handle
 423  * one case of potential deadlock: Assume that thread A needs to prevent both
 424  * object 1 and object 2 from moving, and thread B, the callback, meanwhile
 425  * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
 426  * same lock, that thread A will acquire the lock for object 1 as a reader
 427  * before thread B sets the lock's write-wanted bit, preventing thread A from
 428  * reacquiring the lock for object 2 as a reader. Unable to make forward
 429  * progress, thread A will never release the lock for object 1, resulting in
 430  * deadlock.
 431  *
 432  * There are two ways of avoiding the deadlock just described. The first is to
 433  * use rw_tryenter() rather than rw_enter() in the callback function when
 434  * attempting to acquire the lock as a writer. If tryenter discovers that the
 435  * same object (or another object hashed to the same lock) is already in use, it
 436  * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
 437  * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
 438  * since it allows a thread to acquire the lock as a reader in spite of a
 439  * waiting writer. This second approach insists on moving the object now, no
 440  * matter how many readers the move function must wait for in order to do so,
 441  * and could delay the completion of the callback indefinitely (blocking
 442  * callbacks to other clients). In practice, a less insistent callback using
 443  * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
 444  * little reason to use anything else.
 445  *
 446  * Avoiding deadlock is not the only problem that an implementation using an
 447  * explicit hold needs to solve. Locking the object in the first place (to
 448  * prevent it from moving) remains a problem, since the object could move
 449  * between the time you obtain a pointer to the object and the time you acquire
 450  * the rwlock hashed to that pointer value. Therefore the client needs to
 451  * recheck the value of the pointer after acquiring the lock, drop the lock if
 452  * the value has changed, and try again. This requires a level of indirection:
 453  * something that points to the object rather than the object itself, that the
 454  * client can access safely while attempting to acquire the lock. (The object
 455  * itself cannot be referenced safely because it can move at any time.)
 456  * The following lock-acquisition function takes whatever is safe to reference
 457  * (arg), follows its pointer to the object (using function f), and tries as
 458  * often as necessary to acquire the hashed lock and verify that the object
 459  * still has not moved:
 460  *
 461  *      object_t *
 462  *      object_hold(object_f f, void *arg)
 463  *      {
 464  *              object_t *op;
 465  *
 466  *              op = f(arg);
 467  *              if (op == NULL) {
 468  *                      return (NULL);
 469  *              }
 470  *
 471  *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
 472  *              while (op != f(arg)) {
 473  *                      rw_exit(OBJECT_RWLOCK(op));
 474  *                      op = f(arg);
 475  *                      if (op == NULL) {
 476  *                              break;
 477  *                      }
 478  *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
 479  *              }
 480  *
 481  *              return (op);
 482  *      }
 483  *
 484  * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
 485  * lock reacquisition loop, while necessary, almost never executes. The function
 486  * pointer f (used to obtain the object pointer from arg) has the following type
 487  * definition:
 488  *
 489  *      typedef object_t *(*object_f)(void *arg);
 490  *
 491  * An object_f implementation is likely to be as simple as accessing a structure
 492  * member:
 493  *
 494  *      object_t *
 495  *      s_object(void *arg)
 496  *      {
 497  *              something_t *sp = arg;
 498  *              return (sp->s_object);
 499  *      }
 500  *
 501  * The flexibility of a function pointer allows the path to the object to be
 502  * arbitrarily complex and also supports the notion that depending on where you
 503  * are using the object, you may need to get it from someplace different.
 504  *
 505  * The function that releases the explicit hold is simpler because it does not
 506  * have to worry about the object moving:
 507  *
 508  *      void
 509  *      object_rele(object_t *op)
 510  *      {
 511  *              rw_exit(OBJECT_RWLOCK(op));
 512  *      }
 513  *
 514  * The caller is spared these details so that obtaining and releasing an
 515  * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
 516  * of object_hold() only needs to know that the returned object pointer is valid
 517  * if not NULL and that the object will not move until released.
 518  *
 519  * Although object_hold() prevents an object from moving, it does not prevent it
 520  * from being freed. The caller must take measures before calling object_hold()
 521  * (afterwards is too late) to ensure that the held object cannot be freed. The
 522  * caller must do so without accessing the unsafe object reference, so any lock
 523  * or reference count used to ensure the continued existence of the object must
 524  * live outside the object itself.
 525  *
 526  * Obtaining a new object is a special case where an explicit hold is impossible
 527  * for the caller. Any function that returns a newly allocated object (either as
 528  * a return value, or as an in-out paramter) must return it already held; after
 529  * the caller gets it is too late, since the object cannot be safely accessed
 530  * without the level of indirection described earlier. The following
 531  * object_alloc() example uses the same code shown earlier to transition a new
 532  * object into the state of being recognized (by the client) as a known object.
 533  * The function must acquire the hold (rw_enter) before that state transition
 534  * makes the object movable:
 535  *
 536  *      static object_t *
 537  *      object_alloc(container_t *container)
 538  *      {
 539  *              object_t *object = kmem_cache_alloc(object_cache, 0);
 540  *              ... set any initial state not set by the constructor ...
 541  *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
 542  *              mutex_enter(&container->c_objects_lock);
 543  *              list_insert_tail(&container->c_objects, object);
 544  *              membar_producer();
 545  *              object->o_container = container;
 546  *              mutex_exit(&container->c_objects_lock);
 547  *              return (object);
 548  *      }
 549  *
 550  * Functions that implicitly acquire an object hold (any function that calls
 551  * object_alloc() to supply an object for the caller) need to be carefully noted
 552  * so that the matching object_rele() is not neglected. Otherwise, leaked holds
 553  * prevent all objects hashed to the affected rwlocks from ever being moved.
 554  *
 555  * The pointer to a held object can be hashed to the holding rwlock even after
 556  * the object has been freed. Although it is possible to release the hold
 557  * after freeing the object, you may decide to release the hold implicitly in
 558  * whatever function frees the object, so as to release the hold as soon as
 559  * possible, and for the sake of symmetry with the function that implicitly
 560  * acquires the hold when it allocates the object. Here, object_free() releases
 561  * the hold acquired by object_alloc(). Its implicit object_rele() forms a
 562  * matching pair with object_hold():
 563  *
 564  *      void
 565  *      object_free(object_t *object)
 566  *      {
 567  *              container_t *container;
 568  *
 569  *              ASSERT(object_held(object));
 570  *              container = object->o_container;
 571  *              mutex_enter(&container->c_objects_lock);
 572  *              object->o_container =
 573  *                  (void *)((uintptr_t)object->o_container | 0x1);
 574  *              list_remove(&container->c_objects, object);
 575  *              mutex_exit(&container->c_objects_lock);
 576  *              object_rele(object);
 577  *              kmem_cache_free(object_cache, object);
 578  *      }
 579  *
 580  * Note that object_free() cannot safely accept an object pointer as an argument
 581  * unless the object is already held. Any function that calls object_free()
 582  * needs to be carefully noted since it similarly forms a matching pair with
 583  * object_hold().
 584  *
 585  * To complete the picture, the following callback function implements the
 586  * general solution by moving objects only if they are currently unheld:
 587  *
 588  *      static kmem_cbrc_t
 589  *      object_move(void *buf, void *newbuf, size_t size, void *arg)
 590  *      {
 591  *              object_t *op = buf, *np = newbuf;
 592  *              container_t *container;
 593  *
 594  *              container = op->o_container;
 595  *              if ((uintptr_t)container & 0x3) {
 596  *                      return (KMEM_CBRC_DONT_KNOW);
 597  *              }
 598  *
 599  *              // Ensure that the container structure does not go away.
 600  *              if (container_hold(container) == 0) {
 601  *                      return (KMEM_CBRC_DONT_KNOW);
 602  *              }
 603  *
 604  *              mutex_enter(&container->c_objects_lock);
 605  *              if (container != op->o_container) {
 606  *                      mutex_exit(&container->c_objects_lock);
 607  *                      container_rele(container);
 608  *                      return (KMEM_CBRC_DONT_KNOW);
 609  *              }
 610  *
 611  *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
 612  *                      mutex_exit(&container->c_objects_lock);
 613  *                      container_rele(container);
 614  *                      return (KMEM_CBRC_LATER);
 615  *              }
 616  *
 617  *              object_move_impl(op, np); // critical section
 618  *              rw_exit(OBJECT_RWLOCK(op));
 619  *
 620  *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
 621  *              list_link_replace(&op->o_link_node, &np->o_link_node);
 622  *              mutex_exit(&container->c_objects_lock);
 623  *              container_rele(container);
 624  *              return (KMEM_CBRC_YES);
 625  *      }
 626  *
 627  * Note that object_move() must invalidate the designated o_container pointer of
 628  * the old object in the same way that object_free() does, since kmem will free
 629  * the object in response to the KMEM_CBRC_YES return value.
 630  *
 631  * The lock order in object_move() differs from object_alloc(), which locks
 632  * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
 633  * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
 634  * not a problem. Holding the lock on the object list in the example above
 635  * through the entire callback not only prevents the object from going away, it
 636  * also allows you to lock the list elsewhere and know that none of its elements
 637  * will move during iteration.
 638  *
 639  * Adding an explicit hold everywhere an object from the cache is used is tricky
 640  * and involves much more change to client code than a cache-specific solution
 641  * that leverages existing state to decide whether or not an object is
 642  * movable. However, this approach has the advantage that no object remains
 643  * immovable for any significant length of time, making it extremely unlikely
 644  * that long-lived allocations can continue holding slabs hostage; and it works
 645  * for any cache.
 646  *
 647  * 3. Consolidator Implementation
 648  *
 649  * Once the client supplies a move function that a) recognizes known objects and
 650  * b) avoids moving objects that are actively in use, the remaining work is up
 651  * to the consolidator to decide which objects to move and when to issue
 652  * callbacks.
 653  *
 654  * The consolidator relies on the fact that a cache's slabs are ordered by
 655  * usage. Each slab has a fixed number of objects. Depending on the slab's
 656  * "color" (the offset of the first object from the beginning of the slab;
 657  * offsets are staggered to mitigate false sharing of cache lines) it is either
 658  * the maximum number of objects per slab determined at cache creation time or
 659  * else the number closest to the maximum that fits within the space remaining
 660  * after the initial offset. A completely allocated slab may contribute some
 661  * internal fragmentation (per-slab overhead) but no external fragmentation, so
 662  * it is of no interest to the consolidator. At the other extreme, slabs whose
 663  * objects have all been freed to the slab are released to the virtual memory
 664  * (VM) subsystem (objects freed to magazines are still allocated as far as the
 665  * slab is concerned). External fragmentation exists when there are slabs
 666  * somewhere between these extremes. A partial slab has at least one but not all
 667  * of its objects allocated. The more partial slabs, and the fewer allocated
 668  * objects on each of them, the higher the fragmentation. Hence the
 669  * consolidator's overall strategy is to reduce the number of partial slabs by
 670  * moving allocated objects from the least allocated slabs to the most allocated
 671  * slabs.
 672  *
 673  * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
 674  * slabs are kept separately in an unordered list. Since the majority of slabs
 675  * tend to be completely allocated (a typical unfragmented cache may have
 676  * thousands of complete slabs and only a single partial slab), separating
 677  * complete slabs improves the efficiency of partial slab ordering, since the
 678  * complete slabs do not affect the depth or balance of the AVL tree. This
 679  * ordered sequence of partial slabs acts as a "free list" supplying objects for
 680  * allocation requests.
 681  *
 682  * Objects are always allocated from the first partial slab in the free list,
 683  * where the allocation is most likely to eliminate a partial slab (by
 684  * completely allocating it). Conversely, when a single object from a completely
 685  * allocated slab is freed to the slab, that slab is added to the front of the
 686  * free list. Since most free list activity involves highly allocated slabs
 687  * coming and going at the front of the list, slabs tend naturally toward the
 688  * ideal order: highly allocated at the front, sparsely allocated at the back.
 689  * Slabs with few allocated objects are likely to become completely free if they
 690  * keep a safe distance away from the front of the free list. Slab misorders
 691  * interfere with the natural tendency of slabs to become completely free or
 692  * completely allocated. For example, a slab with a single allocated object
 693  * needs only a single free to escape the cache; its natural desire is
 694  * frustrated when it finds itself at the front of the list where a second
 695  * allocation happens just before the free could have released it. Another slab
 696  * with all but one object allocated might have supplied the buffer instead, so
 697  * that both (as opposed to neither) of the slabs would have been taken off the
 698  * free list.
 699  *
 700  * Although slabs tend naturally toward the ideal order, misorders allowed by a
 701  * simple list implementation defeat the consolidator's strategy of merging
 702  * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
 703  * needs another way to fix misorders to optimize its callback strategy. One
 704  * approach is to periodically scan a limited number of slabs, advancing a
 705  * marker to hold the current scan position, and to move extreme misorders to
 706  * the front or back of the free list and to the front or back of the current
 707  * scan range. By making consecutive scan ranges overlap by one slab, the least
 708  * allocated slab in the current range can be carried along from the end of one
 709  * scan to the start of the next.
 710  *
 711  * Maintaining partial slabs in an AVL tree relieves kmem of this additional
 712  * task, however. Since most of the cache's activity is in the magazine layer,
 713  * and allocations from the slab layer represent only a startup cost, the
 714  * overhead of maintaining a balanced tree is not a significant concern compared
 715  * to the opportunity of reducing complexity by eliminating the partial slab
 716  * scanner just described. The overhead of an AVL tree is minimized by
 717  * maintaining only partial slabs in the tree and keeping completely allocated
 718  * slabs separately in a list. To avoid increasing the size of the slab
 719  * structure the AVL linkage pointers are reused for the slab's list linkage,
 720  * since the slab will always be either partial or complete, never stored both
 721  * ways at the same time. To further minimize the overhead of the AVL tree the
 722  * compare function that orders partial slabs by usage divides the range of
 723  * allocated object counts into bins such that counts within the same bin are
 724  * considered equal. Binning partial slabs makes it less likely that allocating
 725  * or freeing a single object will change the slab's order, requiring a tree
 726  * reinsertion (an avl_remove() followed by an avl_add(), both potentially
 727  * requiring some rebalancing of the tree). Allocation counts closest to
 728  * completely free and completely allocated are left unbinned (finely sorted) to
 729  * better support the consolidator's strategy of merging slabs at either
 730  * extreme.
 731  *
 732  * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
 733  *
 734  * The consolidator piggybacks on the kmem maintenance thread and is called on
 735  * the same interval as kmem_cache_update(), once per cache every fifteen
 736  * seconds. kmem maintains a running count of unallocated objects in the slab
 737  * layer (cache_bufslab). The consolidator checks whether that number exceeds
 738  * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
 739  * there is a significant number of slabs in the cache (arbitrarily a minimum
 740  * 101 total slabs). Unused objects that have fallen out of the magazine layer's
 741  * working set are included in the assessment, and magazines in the depot are
 742  * reaped if those objects would lift cache_bufslab above the fragmentation
 743  * threshold. Once the consolidator decides that a cache is fragmented, it looks
 744  * for a candidate slab to reclaim, starting at the end of the partial slab free
 745  * list and scanning backwards. At first the consolidator is choosy: only a slab
 746  * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
 747  * single allocated object, regardless of percentage). If there is difficulty
 748  * finding a candidate slab, kmem raises the allocation threshold incrementally,
 749  * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
 750  * external fragmentation (unused objects on the free list) below 12.5% (1/8),
 751  * even in the worst case of every slab in the cache being almost 7/8 allocated.
 752  * The threshold can also be lowered incrementally when candidate slabs are easy
 753  * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
 754  * is no longer fragmented.
 755  *
 756  * 3.2 Generating Callbacks
 757  *
 758  * Once an eligible slab is chosen, a callback is generated for every allocated
 759  * object on the slab, in the hope that the client will move everything off the
 760  * slab and make it reclaimable. Objects selected as move destinations are
 761  * chosen from slabs at the front of the free list. Assuming slabs in the ideal
 762  * order (most allocated at the front, least allocated at the back) and a
 763  * cooperative client, the consolidator will succeed in removing slabs from both
 764  * ends of the free list, completely allocating on the one hand and completely
 765  * freeing on the other. Objects selected as move destinations are allocated in
 766  * the kmem maintenance thread where move requests are enqueued. A separate
 767  * callback thread removes pending callbacks from the queue and calls the
 768  * client. The separate thread ensures that client code (the move function) does
 769  * not interfere with internal kmem maintenance tasks. A map of pending
 770  * callbacks keyed by object address (the object to be moved) is checked to
 771  * ensure that duplicate callbacks are not generated for the same object.
 772  * Allocating the move destination (the object to move to) prevents subsequent
 773  * callbacks from selecting the same destination as an earlier pending callback.
 774  *
 775  * Move requests can also be generated by kmem_cache_reap() when the system is
 776  * desperate for memory and by kmem_cache_move_notify(), called by the client to
 777  * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
 778  * The map of pending callbacks is protected by the same lock that protects the
 779  * slab layer.
 780  *
 781  * When the system is desperate for memory, kmem does not bother to determine
 782  * whether or not the cache exceeds the fragmentation threshold, but tries to
 783  * consolidate as many slabs as possible. Normally, the consolidator chews
 784  * slowly, one sparsely allocated slab at a time during each maintenance
 785  * interval that the cache is fragmented. When desperate, the consolidator
 786  * starts at the last partial slab and enqueues callbacks for every allocated
 787  * object on every partial slab, working backwards until it reaches the first
 788  * partial slab. The first partial slab, meanwhile, advances in pace with the
 789  * consolidator as allocations to supply move destinations for the enqueued
 790  * callbacks use up the highly allocated slabs at the front of the free list.
 791  * Ideally, the overgrown free list collapses like an accordion, starting at
 792  * both ends and ending at the center with a single partial slab.
 793  *
 794  * 3.3 Client Responses
 795  *
 796  * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
 797  * marks the slab that supplied the stuck object non-reclaimable and moves it to
 798  * front of the free list. The slab remains marked as long as it remains on the
 799  * free list, and it appears more allocated to the partial slab compare function
 800  * than any unmarked slab, no matter how many of its objects are allocated.
 801  * Since even one immovable object ties up the entire slab, the goal is to
 802  * completely allocate any slab that cannot be completely freed. kmem does not
 803  * bother generating callbacks to move objects from a marked slab unless the
 804  * system is desperate.
 805  *
 806  * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
 807  * slab. If the client responds LATER too many times, kmem disbelieves and
 808  * treats the response as a NO. The count is cleared when the slab is taken off
 809  * the partial slab list or when the client moves one of the slab's objects.
 810  *
 811  * 4. Observability
 812  *
 813  * A kmem cache's external fragmentation is best observed with 'mdb -k' using
 814  * the ::kmem_slabs dcmd. For a complete description of the command, enter
 815  * '::help kmem_slabs' at the mdb prompt.
 816  */
 817 
 818 #include <sys/kmem_impl.h>
 819 #include <sys/vmem_impl.h>
 820 #include <sys/param.h>
 821 #include <sys/sysmacros.h>
 822 #include <sys/vm.h>
 823 #include <sys/proc.h>
 824 #include <sys/tuneable.h>
 825 #include <sys/systm.h>
 826 #include <sys/cmn_err.h>
 827 #include <sys/debug.h>
 828 #include <sys/sdt.h>
 829 #include <sys/mutex.h>
 830 #include <sys/bitmap.h>
 831 #include <sys/atomic.h>
 832 #include <sys/kobj.h>
 833 #include <sys/disp.h>
 834 #include <vm/seg_kmem.h>
 835 #include <sys/log.h>
 836 #include <sys/callb.h>
 837 #include <sys/taskq.h>
 838 #include <sys/modctl.h>
 839 #include <sys/reboot.h>
 840 #include <sys/id32.h>
 841 #include <sys/zone.h>
 842 #include <sys/netstack.h>
 843 #ifdef  DEBUG
 844 #include <sys/random.h>
 845 #endif
 846 
 847 extern void streams_msg_init(void);
 848 extern int segkp_fromheap;
 849 extern void segkp_cache_free(void);
 850 extern int callout_init_done;
 851 
 852 struct kmem_cache_kstat {
 853         kstat_named_t   kmc_buf_size;
 854         kstat_named_t   kmc_align;
 855         kstat_named_t   kmc_chunk_size;
 856         kstat_named_t   kmc_slab_size;
 857         kstat_named_t   kmc_alloc;
 858         kstat_named_t   kmc_alloc_fail;
 859         kstat_named_t   kmc_free;
 860         kstat_named_t   kmc_depot_alloc;
 861         kstat_named_t   kmc_depot_free;
 862         kstat_named_t   kmc_depot_contention;
 863         kstat_named_t   kmc_slab_alloc;
 864         kstat_named_t   kmc_slab_free;
 865         kstat_named_t   kmc_buf_constructed;
 866         kstat_named_t   kmc_buf_avail;
 867         kstat_named_t   kmc_buf_inuse;
 868         kstat_named_t   kmc_buf_total;
 869         kstat_named_t   kmc_buf_max;
 870         kstat_named_t   kmc_slab_create;
 871         kstat_named_t   kmc_slab_destroy;
 872         kstat_named_t   kmc_vmem_source;
 873         kstat_named_t   kmc_hash_size;
 874         kstat_named_t   kmc_hash_lookup_depth;
 875         kstat_named_t   kmc_hash_rescale;
 876         kstat_named_t   kmc_full_magazines;
 877         kstat_named_t   kmc_empty_magazines;
 878         kstat_named_t   kmc_magazine_size;
 879         kstat_named_t   kmc_reap; /* number of kmem_cache_reap() calls */
 880         kstat_named_t   kmc_defrag; /* attempts to defrag all partial slabs */
 881         kstat_named_t   kmc_scan; /* attempts to defrag one partial slab */
 882         kstat_named_t   kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
 883         kstat_named_t   kmc_move_yes;
 884         kstat_named_t   kmc_move_no;
 885         kstat_named_t   kmc_move_later;
 886         kstat_named_t   kmc_move_dont_need;
 887         kstat_named_t   kmc_move_dont_know; /* obj unrecognized by client ... */
 888         kstat_named_t   kmc_move_hunt_found; /* ... but found in mag layer */
 889         kstat_named_t   kmc_move_slabs_freed; /* slabs freed by consolidator */
 890         kstat_named_t   kmc_move_reclaimable; /* buffers, if consolidator ran */
 891 } kmem_cache_kstat = {
 892         { "buf_size",           KSTAT_DATA_UINT64 },
 893         { "align",              KSTAT_DATA_UINT64 },
 894         { "chunk_size",         KSTAT_DATA_UINT64 },
 895         { "slab_size",          KSTAT_DATA_UINT64 },
 896         { "alloc",              KSTAT_DATA_UINT64 },
 897         { "alloc_fail",         KSTAT_DATA_UINT64 },
 898         { "free",               KSTAT_DATA_UINT64 },
 899         { "depot_alloc",        KSTAT_DATA_UINT64 },
 900         { "depot_free",         KSTAT_DATA_UINT64 },
 901         { "depot_contention",   KSTAT_DATA_UINT64 },
 902         { "slab_alloc",         KSTAT_DATA_UINT64 },
 903         { "slab_free",          KSTAT_DATA_UINT64 },
 904         { "buf_constructed",    KSTAT_DATA_UINT64 },
 905         { "buf_avail",          KSTAT_DATA_UINT64 },
 906         { "buf_inuse",          KSTAT_DATA_UINT64 },
 907         { "buf_total",          KSTAT_DATA_UINT64 },
 908         { "buf_max",            KSTAT_DATA_UINT64 },
 909         { "slab_create",        KSTAT_DATA_UINT64 },
 910         { "slab_destroy",       KSTAT_DATA_UINT64 },
 911         { "vmem_source",        KSTAT_DATA_UINT64 },
 912         { "hash_size",          KSTAT_DATA_UINT64 },
 913         { "hash_lookup_depth",  KSTAT_DATA_UINT64 },
 914         { "hash_rescale",       KSTAT_DATA_UINT64 },
 915         { "full_magazines",     KSTAT_DATA_UINT64 },
 916         { "empty_magazines",    KSTAT_DATA_UINT64 },
 917         { "magazine_size",      KSTAT_DATA_UINT64 },
 918         { "reap",               KSTAT_DATA_UINT64 },
 919         { "defrag",             KSTAT_DATA_UINT64 },
 920         { "scan",               KSTAT_DATA_UINT64 },
 921         { "move_callbacks",     KSTAT_DATA_UINT64 },
 922         { "move_yes",           KSTAT_DATA_UINT64 },
 923         { "move_no",            KSTAT_DATA_UINT64 },
 924         { "move_later",         KSTAT_DATA_UINT64 },
 925         { "move_dont_need",     KSTAT_DATA_UINT64 },
 926         { "move_dont_know",     KSTAT_DATA_UINT64 },
 927         { "move_hunt_found",    KSTAT_DATA_UINT64 },
 928         { "move_slabs_freed",   KSTAT_DATA_UINT64 },
 929         { "move_reclaimable",   KSTAT_DATA_UINT64 },
 930 };
 931 
 932 static kmutex_t kmem_cache_kstat_lock;
 933 
 934 /*
 935  * The default set of caches to back kmem_alloc().
 936  * These sizes should be reevaluated periodically.
 937  *
 938  * We want allocations that are multiples of the coherency granularity
 939  * (64 bytes) to be satisfied from a cache which is a multiple of 64
 940  * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
 941  * the next kmem_cache_size greater than or equal to it must be a
 942  * multiple of 64.
 943  *
 944  * We split the table into two sections:  size <= 4k and size > 4k.  This
 945  * saves a lot of space and cache footprint in our cache tables.
 946  */
 947 static const int kmem_alloc_sizes[] = {
 948         1 * 8,
 949         2 * 8,
 950         3 * 8,
 951         4 * 8,          5 * 8,          6 * 8,          7 * 8,
 952         4 * 16,         5 * 16,         6 * 16,         7 * 16,
 953         4 * 32,         5 * 32,         6 * 32,         7 * 32,
 954         4 * 64,         5 * 64,         6 * 64,         7 * 64,
 955         4 * 128,        5 * 128,        6 * 128,        7 * 128,
 956         P2ALIGN(8192 / 7, 64),
 957         P2ALIGN(8192 / 6, 64),
 958         P2ALIGN(8192 / 5, 64),
 959         P2ALIGN(8192 / 4, 64),
 960         P2ALIGN(8192 / 3, 64),
 961         P2ALIGN(8192 / 2, 64),
 962 };
 963 
 964 static const int kmem_big_alloc_sizes[] = {
 965         2 * 4096,       3 * 4096,
 966         2 * 8192,       3 * 8192,
 967         4 * 8192,       5 * 8192,       6 * 8192,       7 * 8192,
 968         8 * 8192,       9 * 8192,       10 * 8192,      11 * 8192,
 969         12 * 8192,      13 * 8192,      14 * 8192,      15 * 8192,
 970         16 * 8192
 971 };
 972 
 973 #define KMEM_MAXBUF             4096
 974 #define KMEM_BIG_MAXBUF_32BIT   32768
 975 #define KMEM_BIG_MAXBUF         131072
 976 
 977 #define KMEM_BIG_MULTIPLE       4096    /* big_alloc_sizes must be a multiple */
 978 #define KMEM_BIG_SHIFT          12      /* lg(KMEM_BIG_MULTIPLE) */
 979 
 980 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
 981 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
 982 
 983 #define KMEM_ALLOC_TABLE_MAX    (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
 984 static size_t kmem_big_alloc_table_max = 0;     /* # of filled elements */
 985 
 986 static kmem_magtype_t kmem_magtype[] = {
 987         { 1,    8,      3200,   65536   },
 988         { 3,    16,     256,    32768   },
 989         { 7,    32,     64,     16384   },
 990         { 15,   64,     0,      8192    },
 991         { 31,   64,     0,      4096    },
 992         { 47,   64,     0,      2048    },
 993         { 63,   64,     0,      1024    },
 994         { 95,   64,     0,      512     },
 995         { 143,  64,     0,      0       },
 996 };
 997 
 998 static uint32_t kmem_reaping;
 999 static uint32_t kmem_reaping_idspace;
1000 
1001 /*
1002  * kmem tunables
1003  */
1004 clock_t kmem_reap_interval;     /* cache reaping rate [15 * HZ ticks] */
1005 int kmem_depot_contention = 3;  /* max failed tryenters per real interval */
1006 pgcnt_t kmem_reapahead = 0;     /* start reaping N pages before pageout */
1007 int kmem_panic = 1;             /* whether to panic on error */
1008 int kmem_logging = 1;           /* kmem_log_enter() override */
1009 uint32_t kmem_mtbf = 0;         /* mean time between failures [default: off] */
1010 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1011 size_t kmem_content_log_size;   /* content log size [2% of memory] */
1012 size_t kmem_failure_log_size;   /* failure log [4 pages per CPU] */
1013 size_t kmem_slab_log_size;      /* slab create log [4 pages per CPU] */
1014 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1015 size_t kmem_lite_minsize = 0;   /* minimum buffer size for KMF_LITE */
1016 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1017 int kmem_lite_pcs = 4;          /* number of PCs to store in KMF_LITE mode */
1018 size_t kmem_maxverify;          /* maximum bytes to inspect in debug routines */
1019 size_t kmem_minfirewall;        /* hardware-enforced redzone threshold */
1020 
1021 #ifdef _LP64
1022 size_t  kmem_max_cached = KMEM_BIG_MAXBUF;      /* maximum kmem_alloc cache */
1023 #else
1024 size_t  kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1025 #endif
1026 
1027 #ifdef DEBUG
1028 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1029 #else
1030 int kmem_flags = 0;
1031 #endif
1032 int kmem_ready;
1033 
1034 static kmem_cache_t     *kmem_slab_cache;
1035 static kmem_cache_t     *kmem_bufctl_cache;
1036 static kmem_cache_t     *kmem_bufctl_audit_cache;
1037 
1038 static kmutex_t         kmem_cache_lock;        /* inter-cache linkage only */
1039 static list_t           kmem_caches;
1040 
1041 static taskq_t          *kmem_taskq;
1042 static kmutex_t         kmem_flags_lock;
1043 static vmem_t           *kmem_metadata_arena;
1044 static vmem_t           *kmem_msb_arena;        /* arena for metadata caches */
1045 static vmem_t           *kmem_cache_arena;
1046 static vmem_t           *kmem_hash_arena;
1047 static vmem_t           *kmem_log_arena;
1048 static vmem_t           *kmem_oversize_arena;
1049 static vmem_t           *kmem_va_arena;
1050 static vmem_t           *kmem_default_arena;
1051 static vmem_t           *kmem_firewall_va_arena;
1052 static vmem_t           *kmem_firewall_arena;
1053 
1054 /*
1055  * kmem slab consolidator thresholds (tunables)
1056  */
1057 size_t kmem_frag_minslabs = 101;        /* minimum total slabs */
1058 size_t kmem_frag_numer = 1;             /* free buffers (numerator) */
1059 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1060 /*
1061  * Maximum number of slabs from which to move buffers during a single
1062  * maintenance interval while the system is not low on memory.
1063  */
1064 size_t kmem_reclaim_max_slabs = 1;
1065 /*
1066  * Number of slabs to scan backwards from the end of the partial slab list
1067  * when searching for buffers to relocate.
1068  */
1069 size_t kmem_reclaim_scan_range = 12;
1070 
1071 /* consolidator knobs */
1072 boolean_t kmem_move_noreap;
1073 boolean_t kmem_move_blocked;
1074 boolean_t kmem_move_fulltilt;
1075 boolean_t kmem_move_any_partial;
1076 
1077 #ifdef  DEBUG
1078 /*
1079  * kmem consolidator debug tunables:
1080  * Ensure code coverage by occasionally running the consolidator even when the
1081  * caches are not fragmented (they may never be). These intervals are mean time
1082  * in cache maintenance intervals (kmem_cache_update).
1083  */
1084 uint32_t kmem_mtb_move = 60;    /* defrag 1 slab (~15min) */
1085 uint32_t kmem_mtb_reap = 1800;  /* defrag all slabs (~7.5hrs) */
1086 #endif  /* DEBUG */
1087 
1088 static kmem_cache_t     *kmem_defrag_cache;
1089 static kmem_cache_t     *kmem_move_cache;
1090 static taskq_t          *kmem_move_taskq;
1091 
1092 static void kmem_cache_scan(kmem_cache_t *);
1093 static void kmem_cache_defrag(kmem_cache_t *);
1094 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1095 
1096 
1097 kmem_log_header_t       *kmem_transaction_log;
1098 kmem_log_header_t       *kmem_content_log;
1099 kmem_log_header_t       *kmem_failure_log;
1100 kmem_log_header_t       *kmem_slab_log;
1101 
1102 static int              kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1103 
1104 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller)                       \
1105         if ((count) > 0) {                                           \
1106                 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1107                 pc_t *_e;                                               \
1108                 /* memmove() the old entries down one notch */          \
1109                 for (_e = &_s[(count) - 1]; _e > _s; _e--)               \
1110                         *_e = *(_e - 1);                                \
1111                 *_s = (uintptr_t)(caller);                              \
1112         }
1113 
1114 #define KMERR_MODIFIED  0       /* buffer modified while on freelist */
1115 #define KMERR_REDZONE   1       /* redzone violation (write past end of buf) */
1116 #define KMERR_DUPFREE   2       /* freed a buffer twice */
1117 #define KMERR_BADADDR   3       /* freed a bad (unallocated) address */
1118 #define KMERR_BADBUFTAG 4       /* buftag corrupted */
1119 #define KMERR_BADBUFCTL 5       /* bufctl corrupted */
1120 #define KMERR_BADCACHE  6       /* freed a buffer to the wrong cache */
1121 #define KMERR_BADSIZE   7       /* alloc size != free size */
1122 #define KMERR_BADBASE   8       /* buffer base address wrong */
1123 
1124 struct {
1125         hrtime_t        kmp_timestamp;  /* timestamp of panic */
1126         int             kmp_error;      /* type of kmem error */
1127         void            *kmp_buffer;    /* buffer that induced panic */
1128         void            *kmp_realbuf;   /* real start address for buffer */
1129         kmem_cache_t    *kmp_cache;     /* buffer's cache according to client */
1130         kmem_cache_t    *kmp_realcache; /* actual cache containing buffer */
1131         kmem_slab_t     *kmp_slab;      /* slab accoring to kmem_findslab() */
1132         kmem_bufctl_t   *kmp_bufctl;    /* bufctl */
1133 } kmem_panic_info;
1134 
1135 
1136 static void
1137 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1138 {
1139         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1140         uint64_t *buf = buf_arg;
1141 
1142         while (buf < bufend)
1143                 *buf++ = pattern;
1144 }
1145 
1146 static void *
1147 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1148 {
1149         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1150         uint64_t *buf;
1151 
1152         for (buf = buf_arg; buf < bufend; buf++)
1153                 if (*buf != pattern)
1154                         return (buf);
1155         return (NULL);
1156 }
1157 
1158 static void *
1159 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1160 {
1161         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1162         uint64_t *buf;
1163 
1164         for (buf = buf_arg; buf < bufend; buf++) {
1165                 if (*buf != old) {
1166                         copy_pattern(old, buf_arg,
1167                             (char *)buf - (char *)buf_arg);
1168                         return (buf);
1169                 }
1170                 *buf = new;
1171         }
1172 
1173         return (NULL);
1174 }
1175 
1176 static void
1177 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1178 {
1179         kmem_cache_t *cp;
1180 
1181         mutex_enter(&kmem_cache_lock);
1182         for (cp = list_head(&kmem_caches); cp != NULL;
1183             cp = list_next(&kmem_caches, cp))
1184                 if (tq != NULL)
1185                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1186                             tqflag);
1187                 else
1188                         func(cp);
1189         mutex_exit(&kmem_cache_lock);
1190 }
1191 
1192 static void
1193 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1194 {
1195         kmem_cache_t *cp;
1196 
1197         mutex_enter(&kmem_cache_lock);
1198         for (cp = list_head(&kmem_caches); cp != NULL;
1199             cp = list_next(&kmem_caches, cp)) {
1200                 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1201                         continue;
1202                 if (tq != NULL)
1203                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1204                             tqflag);
1205                 else
1206                         func(cp);
1207         }
1208         mutex_exit(&kmem_cache_lock);
1209 }
1210 
1211 /*
1212  * Debugging support.  Given a buffer address, find its slab.
1213  */
1214 static kmem_slab_t *
1215 kmem_findslab(kmem_cache_t *cp, void *buf)
1216 {
1217         kmem_slab_t *sp;
1218 
1219         mutex_enter(&cp->cache_lock);
1220         for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1221             sp = list_next(&cp->cache_complete_slabs, sp)) {
1222                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1223                         mutex_exit(&cp->cache_lock);
1224                         return (sp);
1225                 }
1226         }
1227         for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1228             sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1229                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1230                         mutex_exit(&cp->cache_lock);
1231                         return (sp);
1232                 }
1233         }
1234         mutex_exit(&cp->cache_lock);
1235 
1236         return (NULL);
1237 }
1238 
1239 static void
1240 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1241 {
1242         kmem_buftag_t *btp = NULL;
1243         kmem_bufctl_t *bcp = NULL;
1244         kmem_cache_t *cp = cparg;
1245         kmem_slab_t *sp;
1246         uint64_t *off;
1247         void *buf = bufarg;
1248 
1249         kmem_logging = 0;       /* stop logging when a bad thing happens */
1250 
1251         kmem_panic_info.kmp_timestamp = gethrtime();
1252 
1253         sp = kmem_findslab(cp, buf);
1254         if (sp == NULL) {
1255                 for (cp = list_tail(&kmem_caches); cp != NULL;
1256                     cp = list_prev(&kmem_caches, cp)) {
1257                         if ((sp = kmem_findslab(cp, buf)) != NULL)
1258                                 break;
1259                 }
1260         }
1261 
1262         if (sp == NULL) {
1263                 cp = NULL;
1264                 error = KMERR_BADADDR;
1265         } else {
1266                 if (cp != cparg)
1267                         error = KMERR_BADCACHE;
1268                 else
1269                         buf = (char *)bufarg - ((uintptr_t)bufarg -
1270                             (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1271                 if (buf != bufarg)
1272                         error = KMERR_BADBASE;
1273                 if (cp->cache_flags & KMF_BUFTAG)
1274                         btp = KMEM_BUFTAG(cp, buf);
1275                 if (cp->cache_flags & KMF_HASH) {
1276                         mutex_enter(&cp->cache_lock);
1277                         for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1278                                 if (bcp->bc_addr == buf)
1279                                         break;
1280                         mutex_exit(&cp->cache_lock);
1281                         if (bcp == NULL && btp != NULL)
1282                                 bcp = btp->bt_bufctl;
1283                         if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1284                             NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1285                             bcp->bc_addr != buf) {
1286                                 error = KMERR_BADBUFCTL;
1287                                 bcp = NULL;
1288                         }
1289                 }
1290         }
1291 
1292         kmem_panic_info.kmp_error = error;
1293         kmem_panic_info.kmp_buffer = bufarg;
1294         kmem_panic_info.kmp_realbuf = buf;
1295         kmem_panic_info.kmp_cache = cparg;
1296         kmem_panic_info.kmp_realcache = cp;
1297         kmem_panic_info.kmp_slab = sp;
1298         kmem_panic_info.kmp_bufctl = bcp;
1299 
1300         printf("kernel memory allocator: ");
1301 
1302         switch (error) {
1303 
1304         case KMERR_MODIFIED:
1305                 printf("buffer modified after being freed\n");
1306                 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1307                 if (off == NULL)        /* shouldn't happen */
1308                         off = buf;
1309                 printf("modification occurred at offset 0x%lx "
1310                     "(0x%llx replaced by 0x%llx)\n",
1311                     (uintptr_t)off - (uintptr_t)buf,
1312                     (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1313                 break;
1314 
1315         case KMERR_REDZONE:
1316                 printf("redzone violation: write past end of buffer\n");
1317                 break;
1318 
1319         case KMERR_BADADDR:
1320                 printf("invalid free: buffer not in cache\n");
1321                 break;
1322 
1323         case KMERR_DUPFREE:
1324                 printf("duplicate free: buffer freed twice\n");
1325                 break;
1326 
1327         case KMERR_BADBUFTAG:
1328                 printf("boundary tag corrupted\n");
1329                 printf("bcp ^ bxstat = %lx, should be %lx\n",
1330                     (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1331                     KMEM_BUFTAG_FREE);
1332                 break;
1333 
1334         case KMERR_BADBUFCTL:
1335                 printf("bufctl corrupted\n");
1336                 break;
1337 
1338         case KMERR_BADCACHE:
1339                 printf("buffer freed to wrong cache\n");
1340                 printf("buffer was allocated from %s,\n", cp->cache_name);
1341                 printf("caller attempting free to %s.\n", cparg->cache_name);
1342                 break;
1343 
1344         case KMERR_BADSIZE:
1345                 printf("bad free: free size (%u) != alloc size (%u)\n",
1346                     KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1347                     KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1348                 break;
1349 
1350         case KMERR_BADBASE:
1351                 printf("bad free: free address (%p) != alloc address (%p)\n",
1352                     bufarg, buf);
1353                 break;
1354         }
1355 
1356         printf("buffer=%p  bufctl=%p  cache: %s\n",
1357             bufarg, (void *)bcp, cparg->cache_name);
1358 
1359         if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1360             error != KMERR_BADBUFCTL) {
1361                 int d;
1362                 timestruc_t ts;
1363                 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1364 
1365                 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1366                 printf("previous transaction on buffer %p:\n", buf);
1367                 printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1368                     (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1369                     (void *)sp, cp->cache_name);
1370                 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1371                         ulong_t off;
1372                         char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1373                         printf("%s+%lx\n", sym ? sym : "?", off);
1374                 }
1375         }
1376         if (kmem_panic > 0)
1377                 panic("kernel heap corruption detected");
1378         if (kmem_panic == 0)
1379                 debug_enter(NULL);
1380         kmem_logging = 1;       /* resume logging */
1381 }
1382 
1383 static kmem_log_header_t *
1384 kmem_log_init(size_t logsize)
1385 {
1386         kmem_log_header_t *lhp;
1387         int nchunks = 4 * max_ncpus;
1388         size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1389         int i;
1390 
1391         /*
1392          * Make sure that lhp->lh_cpu[] is nicely aligned
1393          * to prevent false sharing of cache lines.
1394          */
1395         lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1396         lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1397             NULL, NULL, VM_SLEEP);
1398         bzero(lhp, lhsize);
1399 
1400         mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1401         lhp->lh_nchunks = nchunks;
1402         lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1403         lhp->lh_base = vmem_alloc(kmem_log_arena,
1404             lhp->lh_chunksize * nchunks, VM_SLEEP);
1405         lhp->lh_free = vmem_alloc(kmem_log_arena,
1406             nchunks * sizeof (int), VM_SLEEP);
1407         bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1408 
1409         for (i = 0; i < max_ncpus; i++) {
1410                 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1411                 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1412                 clhp->clh_chunk = i;
1413         }
1414 
1415         for (i = max_ncpus; i < nchunks; i++)
1416                 lhp->lh_free[i] = i;
1417 
1418         lhp->lh_head = max_ncpus;
1419         lhp->lh_tail = 0;
1420 
1421         return (lhp);
1422 }
1423 
1424 static void *
1425 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1426 {
1427         void *logspace;
1428         kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1429 
1430         if (lhp == NULL || kmem_logging == 0 || panicstr)
1431                 return (NULL);
1432 
1433         mutex_enter(&clhp->clh_lock);
1434         clhp->clh_hits++;
1435         if (size > clhp->clh_avail) {
1436                 mutex_enter(&lhp->lh_lock);
1437                 lhp->lh_hits++;
1438                 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1439                 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1440                 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1441                 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1442                 clhp->clh_current = lhp->lh_base +
1443                     clhp->clh_chunk * lhp->lh_chunksize;
1444                 clhp->clh_avail = lhp->lh_chunksize;
1445                 if (size > lhp->lh_chunksize)
1446                         size = lhp->lh_chunksize;
1447                 mutex_exit(&lhp->lh_lock);
1448         }
1449         logspace = clhp->clh_current;
1450         clhp->clh_current += size;
1451         clhp->clh_avail -= size;
1452         bcopy(data, logspace, size);
1453         mutex_exit(&clhp->clh_lock);
1454         return (logspace);
1455 }
1456 
1457 #define KMEM_AUDIT(lp, cp, bcp)                                         \
1458 {                                                                       \
1459         kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);       \
1460         _bcp->bc_timestamp = gethrtime();                            \
1461         _bcp->bc_thread = curthread;                                 \
1462         _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);    \
1463         _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));       \
1464 }
1465 
1466 static void
1467 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1468     kmem_slab_t *sp, void *addr)
1469 {
1470         kmem_bufctl_audit_t bca;
1471 
1472         bzero(&bca, sizeof (kmem_bufctl_audit_t));
1473         bca.bc_addr = addr;
1474         bca.bc_slab = sp;
1475         bca.bc_cache = cp;
1476         KMEM_AUDIT(lp, cp, &bca);
1477 }
1478 
1479 /*
1480  * Create a new slab for cache cp.
1481  */
1482 static kmem_slab_t *
1483 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1484 {
1485         size_t slabsize = cp->cache_slabsize;
1486         size_t chunksize = cp->cache_chunksize;
1487         int cache_flags = cp->cache_flags;
1488         size_t color, chunks;
1489         char *buf, *slab;
1490         kmem_slab_t *sp;
1491         kmem_bufctl_t *bcp;
1492         vmem_t *vmp = cp->cache_arena;
1493 
1494         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1495 
1496         color = cp->cache_color + cp->cache_align;
1497         if (color > cp->cache_maxcolor)
1498                 color = cp->cache_mincolor;
1499         cp->cache_color = color;
1500 
1501         slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1502 
1503         if (slab == NULL)
1504                 goto vmem_alloc_failure;
1505 
1506         ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1507 
1508         /*
1509          * Reverify what was already checked in kmem_cache_set_move(), since the
1510          * consolidator depends (for correctness) on slabs being initialized
1511          * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1512          * clients to distinguish uninitialized memory from known objects).
1513          */
1514         ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1515         if (!(cp->cache_cflags & KMC_NOTOUCH))
1516                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1517 
1518         if (cache_flags & KMF_HASH) {
1519                 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1520                         goto slab_alloc_failure;
1521                 chunks = (slabsize - color) / chunksize;
1522         } else {
1523                 sp = KMEM_SLAB(cp, slab);
1524                 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1525         }
1526 
1527         sp->slab_cache       = cp;
1528         sp->slab_head        = NULL;
1529         sp->slab_refcnt      = 0;
1530         sp->slab_base        = buf = slab + color;
1531         sp->slab_chunks      = chunks;
1532         sp->slab_stuck_offset = (uint32_t)-1;
1533         sp->slab_later_count = 0;
1534         sp->slab_flags = 0;
1535 
1536         ASSERT(chunks > 0);
1537         while (chunks-- != 0) {
1538                 if (cache_flags & KMF_HASH) {
1539                         bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1540                         if (bcp == NULL)
1541                                 goto bufctl_alloc_failure;
1542                         if (cache_flags & KMF_AUDIT) {
1543                                 kmem_bufctl_audit_t *bcap =
1544                                     (kmem_bufctl_audit_t *)bcp;
1545                                 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1546                                 bcap->bc_cache = cp;
1547                         }
1548                         bcp->bc_addr = buf;
1549                         bcp->bc_slab = sp;
1550                 } else {
1551                         bcp = KMEM_BUFCTL(cp, buf);
1552                 }
1553                 if (cache_flags & KMF_BUFTAG) {
1554                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1555                         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1556                         btp->bt_bufctl = bcp;
1557                         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1558                         if (cache_flags & KMF_DEADBEEF) {
1559                                 copy_pattern(KMEM_FREE_PATTERN, buf,
1560                                     cp->cache_verify);
1561                         }
1562                 }
1563                 bcp->bc_next = sp->slab_head;
1564                 sp->slab_head = bcp;
1565                 buf += chunksize;
1566         }
1567 
1568         kmem_log_event(kmem_slab_log, cp, sp, slab);
1569 
1570         return (sp);
1571 
1572 bufctl_alloc_failure:
1573 
1574         while ((bcp = sp->slab_head) != NULL) {
1575                 sp->slab_head = bcp->bc_next;
1576                 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1577         }
1578         kmem_cache_free(kmem_slab_cache, sp);
1579 
1580 slab_alloc_failure:
1581 
1582         vmem_free(vmp, slab, slabsize);
1583 
1584 vmem_alloc_failure:
1585 
1586         kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1587         atomic_inc_64(&cp->cache_alloc_fail);
1588 
1589         return (NULL);
1590 }
1591 
1592 /*
1593  * Destroy a slab.
1594  */
1595 static void
1596 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1597 {
1598         vmem_t *vmp = cp->cache_arena;
1599         void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1600 
1601         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1602         ASSERT(sp->slab_refcnt == 0);
1603 
1604         if (cp->cache_flags & KMF_HASH) {
1605                 kmem_bufctl_t *bcp;
1606                 while ((bcp = sp->slab_head) != NULL) {
1607                         sp->slab_head = bcp->bc_next;
1608                         kmem_cache_free(cp->cache_bufctl_cache, bcp);
1609                 }
1610                 kmem_cache_free(kmem_slab_cache, sp);
1611         }
1612         vmem_free(vmp, slab, cp->cache_slabsize);
1613 }
1614 
1615 static void *
1616 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1617 {
1618         kmem_bufctl_t *bcp, **hash_bucket;
1619         void *buf;
1620         boolean_t new_slab = (sp->slab_refcnt == 0);
1621 
1622         ASSERT(MUTEX_HELD(&cp->cache_lock));
1623         /*
1624          * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1625          * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1626          * slab is newly created.
1627          */
1628         ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1629             (sp == avl_first(&cp->cache_partial_slabs))));
1630         ASSERT(sp->slab_cache == cp);
1631 
1632         cp->cache_slab_alloc++;
1633         cp->cache_bufslab--;
1634         sp->slab_refcnt++;
1635 
1636         bcp = sp->slab_head;
1637         sp->slab_head = bcp->bc_next;
1638 
1639         if (cp->cache_flags & KMF_HASH) {
1640                 /*
1641                  * Add buffer to allocated-address hash table.
1642                  */
1643                 buf = bcp->bc_addr;
1644                 hash_bucket = KMEM_HASH(cp, buf);
1645                 bcp->bc_next = *hash_bucket;
1646                 *hash_bucket = bcp;
1647                 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1648                         KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1649                 }
1650         } else {
1651                 buf = KMEM_BUF(cp, bcp);
1652         }
1653 
1654         ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1655 
1656         if (sp->slab_head == NULL) {
1657                 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1658                 if (new_slab) {
1659                         ASSERT(sp->slab_chunks == 1);
1660                 } else {
1661                         ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1662                         avl_remove(&cp->cache_partial_slabs, sp);
1663                         sp->slab_later_count = 0; /* clear history */
1664                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1665                         sp->slab_stuck_offset = (uint32_t)-1;
1666                 }
1667                 list_insert_head(&cp->cache_complete_slabs, sp);
1668                 cp->cache_complete_slab_count++;
1669                 return (buf);
1670         }
1671 
1672         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1673         /*
1674          * Peek to see if the magazine layer is enabled before
1675          * we prefill.  We're not holding the cpu cache lock,
1676          * so the peek could be wrong, but there's no harm in it.
1677          */
1678         if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1679             (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
1680                 kmem_slab_prefill(cp, sp);
1681                 return (buf);
1682         }
1683 
1684         if (new_slab) {
1685                 avl_add(&cp->cache_partial_slabs, sp);
1686                 return (buf);
1687         }
1688 
1689         /*
1690          * The slab is now more allocated than it was, so the
1691          * order remains unchanged.
1692          */
1693         ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1694         return (buf);
1695 }
1696 
1697 /*
1698  * Allocate a raw (unconstructed) buffer from cp's slab layer.
1699  */
1700 static void *
1701 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1702 {
1703         kmem_slab_t *sp;
1704         void *buf;
1705         boolean_t test_destructor;
1706 
1707         mutex_enter(&cp->cache_lock);
1708         test_destructor = (cp->cache_slab_alloc == 0);
1709         sp = avl_first(&cp->cache_partial_slabs);
1710         if (sp == NULL) {
1711                 ASSERT(cp->cache_bufslab == 0);
1712 
1713                 /*
1714                  * The freelist is empty.  Create a new slab.
1715                  */
1716                 mutex_exit(&cp->cache_lock);
1717                 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1718                         return (NULL);
1719                 }
1720                 mutex_enter(&cp->cache_lock);
1721                 cp->cache_slab_create++;
1722                 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1723                         cp->cache_bufmax = cp->cache_buftotal;
1724                 cp->cache_bufslab += sp->slab_chunks;
1725         }
1726 
1727         buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1728         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1729             (cp->cache_complete_slab_count +
1730             avl_numnodes(&cp->cache_partial_slabs) +
1731             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1732         mutex_exit(&cp->cache_lock);
1733 
1734         if (test_destructor && cp->cache_destructor != NULL) {
1735                 /*
1736                  * On the first kmem_slab_alloc(), assert that it is valid to
1737                  * call the destructor on a newly constructed object without any
1738                  * client involvement.
1739                  */
1740                 if ((cp->cache_constructor == NULL) ||
1741                     cp->cache_constructor(buf, cp->cache_private,
1742                     kmflag) == 0) {
1743                         cp->cache_destructor(buf, cp->cache_private);
1744                 }
1745                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1746                     cp->cache_bufsize);
1747                 if (cp->cache_flags & KMF_DEADBEEF) {
1748                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1749                 }
1750         }
1751 
1752         return (buf);
1753 }
1754 
1755 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1756 
1757 /*
1758  * Free a raw (unconstructed) buffer to cp's slab layer.
1759  */
1760 static void
1761 kmem_slab_free(kmem_cache_t *cp, void *buf)
1762 {
1763         kmem_slab_t *sp;
1764         kmem_bufctl_t *bcp, **prev_bcpp;
1765 
1766         ASSERT(buf != NULL);
1767 
1768         mutex_enter(&cp->cache_lock);
1769         cp->cache_slab_free++;
1770 
1771         if (cp->cache_flags & KMF_HASH) {
1772                 /*
1773                  * Look up buffer in allocated-address hash table.
1774                  */
1775                 prev_bcpp = KMEM_HASH(cp, buf);
1776                 while ((bcp = *prev_bcpp) != NULL) {
1777                         if (bcp->bc_addr == buf) {
1778                                 *prev_bcpp = bcp->bc_next;
1779                                 sp = bcp->bc_slab;
1780                                 break;
1781                         }
1782                         cp->cache_lookup_depth++;
1783                         prev_bcpp = &bcp->bc_next;
1784                 }
1785         } else {
1786                 bcp = KMEM_BUFCTL(cp, buf);
1787                 sp = KMEM_SLAB(cp, buf);
1788         }
1789 
1790         if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1791                 mutex_exit(&cp->cache_lock);
1792                 kmem_error(KMERR_BADADDR, cp, buf);
1793                 return;
1794         }
1795 
1796         if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1797                 /*
1798                  * If this is the buffer that prevented the consolidator from
1799                  * clearing the slab, we can reset the slab flags now that the
1800                  * buffer is freed. (It makes sense to do this in
1801                  * kmem_cache_free(), where the client gives up ownership of the
1802                  * buffer, but on the hot path the test is too expensive.)
1803                  */
1804                 kmem_slab_move_yes(cp, sp, buf);
1805         }
1806 
1807         if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1808                 if (cp->cache_flags & KMF_CONTENTS)
1809                         ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1810                             kmem_log_enter(kmem_content_log, buf,
1811                             cp->cache_contents);
1812                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1813         }
1814 
1815         bcp->bc_next = sp->slab_head;
1816         sp->slab_head = bcp;
1817 
1818         cp->cache_bufslab++;
1819         ASSERT(sp->slab_refcnt >= 1);
1820 
1821         if (--sp->slab_refcnt == 0) {
1822                 /*
1823                  * There are no outstanding allocations from this slab,
1824                  * so we can reclaim the memory.
1825                  */
1826                 if (sp->slab_chunks == 1) {
1827                         list_remove(&cp->cache_complete_slabs, sp);
1828                         cp->cache_complete_slab_count--;
1829                 } else {
1830                         avl_remove(&cp->cache_partial_slabs, sp);
1831                 }
1832 
1833                 cp->cache_buftotal -= sp->slab_chunks;
1834                 cp->cache_bufslab -= sp->slab_chunks;
1835                 /*
1836                  * Defer releasing the slab to the virtual memory subsystem
1837                  * while there is a pending move callback, since we guarantee
1838                  * that buffers passed to the move callback have only been
1839                  * touched by kmem or by the client itself. Since the memory
1840                  * patterns baddcafe (uninitialized) and deadbeef (freed) both
1841                  * set at least one of the two lowest order bits, the client can
1842                  * test those bits in the move callback to determine whether or
1843                  * not it knows about the buffer (assuming that the client also
1844                  * sets one of those low order bits whenever it frees a buffer).
1845                  */
1846                 if (cp->cache_defrag == NULL ||
1847                     (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1848                     !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1849                         cp->cache_slab_destroy++;
1850                         mutex_exit(&cp->cache_lock);
1851                         kmem_slab_destroy(cp, sp);
1852                 } else {
1853                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1854                         /*
1855                          * Slabs are inserted at both ends of the deadlist to
1856                          * distinguish between slabs freed while move callbacks
1857                          * are pending (list head) and a slab freed while the
1858                          * lock is dropped in kmem_move_buffers() (list tail) so
1859                          * that in both cases slab_destroy() is called from the
1860                          * right context.
1861                          */
1862                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1863                                 list_insert_tail(deadlist, sp);
1864                         } else {
1865                                 list_insert_head(deadlist, sp);
1866                         }
1867                         cp->cache_defrag->kmd_deadcount++;
1868                         mutex_exit(&cp->cache_lock);
1869                 }
1870                 return;
1871         }
1872 
1873         if (bcp->bc_next == NULL) {
1874                 /* Transition the slab from completely allocated to partial. */
1875                 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1876                 ASSERT(sp->slab_chunks > 1);
1877                 list_remove(&cp->cache_complete_slabs, sp);
1878                 cp->cache_complete_slab_count--;
1879                 avl_add(&cp->cache_partial_slabs, sp);
1880         } else {
1881                 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1882         }
1883 
1884         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1885             (cp->cache_complete_slab_count +
1886             avl_numnodes(&cp->cache_partial_slabs) +
1887             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1888         mutex_exit(&cp->cache_lock);
1889 }
1890 
1891 /*
1892  * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1893  */
1894 static int
1895 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1896     caddr_t caller)
1897 {
1898         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1899         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1900         uint32_t mtbf;
1901 
1902         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1903                 kmem_error(KMERR_BADBUFTAG, cp, buf);
1904                 return (-1);
1905         }
1906 
1907         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1908 
1909         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1910                 kmem_error(KMERR_BADBUFCTL, cp, buf);
1911                 return (-1);
1912         }
1913 
1914         if (cp->cache_flags & KMF_DEADBEEF) {
1915                 if (!construct && (cp->cache_flags & KMF_LITE)) {
1916                         if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1917                                 kmem_error(KMERR_MODIFIED, cp, buf);
1918                                 return (-1);
1919                         }
1920                         if (cp->cache_constructor != NULL)
1921                                 *(uint64_t *)buf = btp->bt_redzone;
1922                         else
1923                                 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1924                 } else {
1925                         construct = 1;
1926                         if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1927                             KMEM_UNINITIALIZED_PATTERN, buf,
1928                             cp->cache_verify)) {
1929                                 kmem_error(KMERR_MODIFIED, cp, buf);
1930                                 return (-1);
1931                         }
1932                 }
1933         }
1934         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1935 
1936         if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1937             gethrtime() % mtbf == 0 &&
1938             (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1939                 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1940                 if (!construct && cp->cache_destructor != NULL)
1941                         cp->cache_destructor(buf, cp->cache_private);
1942         } else {
1943                 mtbf = 0;
1944         }
1945 
1946         if (mtbf || (construct && cp->cache_constructor != NULL &&
1947             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1948                 atomic_inc_64(&cp->cache_alloc_fail);
1949                 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1950                 if (cp->cache_flags & KMF_DEADBEEF)
1951                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1952                 kmem_slab_free(cp, buf);
1953                 return (1);
1954         }
1955 
1956         if (cp->cache_flags & KMF_AUDIT) {
1957                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1958         }
1959 
1960         if ((cp->cache_flags & KMF_LITE) &&
1961             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1962                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1963         }
1964 
1965         return (0);
1966 }
1967 
1968 static int
1969 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1970 {
1971         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1972         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1973         kmem_slab_t *sp;
1974 
1975         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1976                 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1977                         kmem_error(KMERR_DUPFREE, cp, buf);
1978                         return (-1);
1979                 }
1980                 sp = kmem_findslab(cp, buf);
1981                 if (sp == NULL || sp->slab_cache != cp)
1982                         kmem_error(KMERR_BADADDR, cp, buf);
1983                 else
1984                         kmem_error(KMERR_REDZONE, cp, buf);
1985                 return (-1);
1986         }
1987 
1988         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1989 
1990         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1991                 kmem_error(KMERR_BADBUFCTL, cp, buf);
1992                 return (-1);
1993         }
1994 
1995         if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
1996                 kmem_error(KMERR_REDZONE, cp, buf);
1997                 return (-1);
1998         }
1999 
2000         if (cp->cache_flags & KMF_AUDIT) {
2001                 if (cp->cache_flags & KMF_CONTENTS)
2002                         bcp->bc_contents = kmem_log_enter(kmem_content_log,
2003                             buf, cp->cache_contents);
2004                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2005         }
2006 
2007         if ((cp->cache_flags & KMF_LITE) &&
2008             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2009                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2010         }
2011 
2012         if (cp->cache_flags & KMF_DEADBEEF) {
2013                 if (cp->cache_flags & KMF_LITE)
2014                         btp->bt_redzone = *(uint64_t *)buf;
2015                 else if (cp->cache_destructor != NULL)
2016                         cp->cache_destructor(buf, cp->cache_private);
2017 
2018                 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2019         }
2020 
2021         return (0);
2022 }
2023 
2024 /*
2025  * Free each object in magazine mp to cp's slab layer, and free mp itself.
2026  */
2027 static void
2028 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2029 {
2030         int round;
2031 
2032         ASSERT(!list_link_active(&cp->cache_link) ||
2033             taskq_member(kmem_taskq, curthread));
2034 
2035         for (round = 0; round < nrounds; round++) {
2036                 void *buf = mp->mag_round[round];
2037 
2038                 if (cp->cache_flags & KMF_DEADBEEF) {
2039                         if (verify_pattern(KMEM_FREE_PATTERN, buf,
2040                             cp->cache_verify) != NULL) {
2041                                 kmem_error(KMERR_MODIFIED, cp, buf);
2042                                 continue;
2043                         }
2044                         if ((cp->cache_flags & KMF_LITE) &&
2045                             cp->cache_destructor != NULL) {
2046                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2047                                 *(uint64_t *)buf = btp->bt_redzone;
2048                                 cp->cache_destructor(buf, cp->cache_private);
2049                                 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2050                         }
2051                 } else if (cp->cache_destructor != NULL) {
2052                         cp->cache_destructor(buf, cp->cache_private);
2053                 }
2054 
2055                 kmem_slab_free(cp, buf);
2056         }
2057         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2058         kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2059 }
2060 
2061 /*
2062  * Allocate a magazine from the depot.
2063  */
2064 static kmem_magazine_t *
2065 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2066 {
2067         kmem_magazine_t *mp;
2068 
2069         /*
2070          * If we can't get the depot lock without contention,
2071          * update our contention count.  We use the depot
2072          * contention rate to determine whether we need to
2073          * increase the magazine size for better scalability.
2074          */
2075         if (!mutex_tryenter(&cp->cache_depot_lock)) {
2076                 mutex_enter(&cp->cache_depot_lock);
2077                 cp->cache_depot_contention++;
2078         }
2079 
2080         if ((mp = mlp->ml_list) != NULL) {
2081                 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2082                 mlp->ml_list = mp->mag_next;
2083                 if (--mlp->ml_total < mlp->ml_min)
2084                         mlp->ml_min = mlp->ml_total;
2085                 mlp->ml_alloc++;
2086         }
2087 
2088         mutex_exit(&cp->cache_depot_lock);
2089 
2090         return (mp);
2091 }
2092 
2093 /*
2094  * Free a magazine to the depot.
2095  */
2096 static void
2097 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2098 {
2099         mutex_enter(&cp->cache_depot_lock);
2100         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2101         mp->mag_next = mlp->ml_list;
2102         mlp->ml_list = mp;
2103         mlp->ml_total++;
2104         mutex_exit(&cp->cache_depot_lock);
2105 }
2106 
2107 /*
2108  * Update the working set statistics for cp's depot.
2109  */
2110 static void
2111 kmem_depot_ws_update(kmem_cache_t *cp)
2112 {
2113         mutex_enter(&cp->cache_depot_lock);
2114         cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2115         cp->cache_full.ml_min = cp->cache_full.ml_total;
2116         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2117         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2118         mutex_exit(&cp->cache_depot_lock);
2119 }
2120 
2121 /*
2122  * Set the working set statistics for cp's depot to zero.  (Everything is
2123  * eligible for reaping.)
2124  */
2125 static void
2126 kmem_depot_ws_zero(kmem_cache_t *cp)
2127 {
2128         mutex_enter(&cp->cache_depot_lock);
2129         cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2130         cp->cache_full.ml_min = cp->cache_full.ml_total;
2131         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2132         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2133         mutex_exit(&cp->cache_depot_lock);
2134 }
2135 
2136 /*
2137  * The number of bytes to reap before we call kpreempt(). The default (1MB)
2138  * causes us to preempt reaping up to hundreds of times per second. Using a
2139  * larger value (1GB) causes this to have virtually no effect.
2140  */
2141 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2142 
2143 /*
2144  * Reap all magazines that have fallen out of the depot's working set.
2145  */
2146 static void
2147 kmem_depot_ws_reap(kmem_cache_t *cp)
2148 {
2149         size_t bytes = 0;
2150         long reap;
2151         kmem_magazine_t *mp;
2152 
2153         ASSERT(!list_link_active(&cp->cache_link) ||
2154             taskq_member(kmem_taskq, curthread));
2155 
2156         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2157         while (reap-- &&
2158             (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2159                 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2160                 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2161                 if (bytes > kmem_reap_preempt_bytes) {
2162                         kpreempt(KPREEMPT_SYNC);
2163                         bytes = 0;
2164                 }
2165         }
2166 
2167         reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2168         while (reap-- &&
2169             (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2170                 kmem_magazine_destroy(cp, mp, 0);
2171                 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2172                 if (bytes > kmem_reap_preempt_bytes) {
2173                         kpreempt(KPREEMPT_SYNC);
2174                         bytes = 0;
2175                 }
2176         }
2177 }
2178 
2179 static void
2180 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2181 {
2182         ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2183             (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2184         ASSERT(ccp->cc_magsize > 0);
2185 
2186         ccp->cc_ploaded = ccp->cc_loaded;
2187         ccp->cc_prounds = ccp->cc_rounds;
2188         ccp->cc_loaded = mp;
2189         ccp->cc_rounds = rounds;
2190 }
2191 
2192 /*
2193  * Intercept kmem alloc/free calls during crash dump in order to avoid
2194  * changing kmem state while memory is being saved to the dump device.
2195  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2196  * there are no locks because only one CPU calls kmem during a crash
2197  * dump. To enable this feature, first create the associated vmem
2198  * arena with VMC_DUMPSAFE.
2199  */
2200 static void *kmem_dump_start;   /* start of pre-reserved heap */
2201 static void *kmem_dump_end;     /* end of heap area */
2202 static void *kmem_dump_curr;    /* current free heap pointer */
2203 static size_t kmem_dump_size;   /* size of heap area */
2204 
2205 /* append to each buf created in the pre-reserved heap */
2206 typedef struct kmem_dumpctl {
2207         void    *kdc_next;      /* cache dump free list linkage */
2208 } kmem_dumpctl_t;
2209 
2210 #define KMEM_DUMPCTL(cp, buf)   \
2211         ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2212             sizeof (void *)))
2213 
2214 /* Keep some simple stats. */
2215 #define KMEM_DUMP_LOGS  (100)
2216 
2217 typedef struct kmem_dump_log {
2218         kmem_cache_t    *kdl_cache;
2219         uint_t          kdl_allocs;             /* # of dump allocations */
2220         uint_t          kdl_frees;              /* # of dump frees */
2221         uint_t          kdl_alloc_fails;        /* # of allocation failures */
2222         uint_t          kdl_free_nondump;       /* # of non-dump frees */
2223         uint_t          kdl_unsafe;             /* cache was used, but unsafe */
2224 } kmem_dump_log_t;
2225 
2226 static kmem_dump_log_t *kmem_dump_log;
2227 static int kmem_dump_log_idx;
2228 
2229 #define KDI_LOG(cp, stat) {                                             \
2230         kmem_dump_log_t *kdl;                                           \
2231         if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) {      \
2232                 kdl->stat++;                                         \
2233         } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) {             \
2234                 kdl = &kmem_dump_log[kmem_dump_log_idx++];          \
2235                 kdl->stat++;                                         \
2236                 kdl->kdl_cache = (cp);                                       \
2237                 (cp)->cache_dumplog = kdl;                           \
2238         }                                                               \
2239 }
2240 
2241 /* set non zero for full report */
2242 uint_t kmem_dump_verbose = 0;
2243 
2244 /* stats for overize heap */
2245 uint_t kmem_dump_oversize_allocs = 0;
2246 uint_t kmem_dump_oversize_max = 0;
2247 
2248 static void
2249 kmem_dumppr(char **pp, char *e, const char *format, ...)
2250 {
2251         char *p = *pp;
2252 
2253         if (p < e) {
2254                 int n;
2255                 va_list ap;
2256 
2257                 va_start(ap, format);
2258                 n = vsnprintf(p, e - p, format, ap);
2259                 va_end(ap);
2260                 *pp = p + n;
2261         }
2262 }
2263 
2264 /*
2265  * Called when dumpadm(1M) configures dump parameters.
2266  */
2267 void
2268 kmem_dump_init(size_t size)
2269 {
2270         if (kmem_dump_start != NULL)
2271                 kmem_free(kmem_dump_start, kmem_dump_size);
2272 
2273         if (kmem_dump_log == NULL)
2274                 kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2275                     sizeof (kmem_dump_log_t), KM_SLEEP);
2276 
2277         kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2278 
2279         if (kmem_dump_start != NULL) {
2280                 kmem_dump_size = size;
2281                 kmem_dump_curr = kmem_dump_start;
2282                 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2283                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2284         } else {
2285                 kmem_dump_size = 0;
2286                 kmem_dump_curr = NULL;
2287                 kmem_dump_end = NULL;
2288         }
2289 }
2290 
2291 /*
2292  * Set flag for each kmem_cache_t if is safe to use alternate dump
2293  * memory. Called just before panic crash dump starts. Set the flag
2294  * for the calling CPU.
2295  */
2296 void
2297 kmem_dump_begin(void)
2298 {
2299         ASSERT(panicstr != NULL);
2300         if (kmem_dump_start != NULL) {
2301                 kmem_cache_t *cp;
2302 
2303                 for (cp = list_head(&kmem_caches); cp != NULL;
2304                     cp = list_next(&kmem_caches, cp)) {
2305                         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2306 
2307                         if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2308                                 cp->cache_flags |= KMF_DUMPDIVERT;
2309                                 ccp->cc_flags |= KMF_DUMPDIVERT;
2310                                 ccp->cc_dump_rounds = ccp->cc_rounds;
2311                                 ccp->cc_dump_prounds = ccp->cc_prounds;
2312                                 ccp->cc_rounds = ccp->cc_prounds = -1;
2313                         } else {
2314                                 cp->cache_flags |= KMF_DUMPUNSAFE;
2315                                 ccp->cc_flags |= KMF_DUMPUNSAFE;
2316                         }
2317                 }
2318         }
2319 }
2320 
2321 /*
2322  * finished dump intercept
2323  * print any warnings on the console
2324  * return verbose information to dumpsys() in the given buffer
2325  */
2326 size_t
2327 kmem_dump_finish(char *buf, size_t size)
2328 {
2329         int kdi_idx;
2330         int kdi_end = kmem_dump_log_idx;
2331         int percent = 0;
2332         int header = 0;
2333         int warn = 0;
2334         size_t used;
2335         kmem_cache_t *cp;
2336         kmem_dump_log_t *kdl;
2337         char *e = buf + size;
2338         char *p = buf;
2339 
2340         if (kmem_dump_size == 0 || kmem_dump_verbose == 0)
2341                 return (0);
2342 
2343         used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2344         percent = (used * 100) / kmem_dump_size;
2345 
2346         kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2347         kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2348         kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2349         kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2350             kmem_dump_oversize_allocs);
2351         kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2352             kmem_dump_oversize_max);
2353 
2354         for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2355                 kdl = &kmem_dump_log[kdi_idx];
2356                 cp = kdl->kdl_cache;
2357                 if (cp == NULL)
2358                         break;
2359                 if (kdl->kdl_alloc_fails)
2360                         ++warn;
2361                 if (header == 0) {
2362                         kmem_dumppr(&p, e,
2363                             "Cache Name,Allocs,Frees,Alloc Fails,"
2364                             "Nondump Frees,Unsafe Allocs/Frees\n");
2365                         header = 1;
2366                 }
2367                 kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2368                     cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2369                     kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2370                     kdl->kdl_unsafe);
2371         }
2372 
2373         /* return buffer size used */
2374         if (p < e)
2375                 bzero(p, e - p);
2376         return (p - buf);
2377 }
2378 
2379 /*
2380  * Allocate a constructed object from alternate dump memory.
2381  */
2382 void *
2383 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2384 {
2385         void *buf;
2386         void *curr;
2387         char *bufend;
2388 
2389         /* return a constructed object */
2390         if ((buf = cp->cache_dumpfreelist) != NULL) {
2391                 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2392                 KDI_LOG(cp, kdl_allocs);
2393                 return (buf);
2394         }
2395 
2396         /* create a new constructed object */
2397         curr = kmem_dump_curr;
2398         buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2399         bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2400 
2401         /* hat layer objects cannot cross a page boundary */
2402         if (cp->cache_align < PAGESIZE) {
2403                 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2404                 if (bufend > page) {
2405                         bufend += page - (char *)buf;
2406                         buf = (void *)page;
2407                 }
2408         }
2409 
2410         /* fall back to normal alloc if reserved area is used up */
2411         if (bufend > (char *)kmem_dump_end) {
2412                 kmem_dump_curr = kmem_dump_end;
2413                 KDI_LOG(cp, kdl_alloc_fails);
2414                 return (NULL);
2415         }
2416 
2417         /*
2418          * Must advance curr pointer before calling a constructor that
2419          * may also allocate memory.
2420          */
2421         kmem_dump_curr = bufend;
2422 
2423         /* run constructor */
2424         if (cp->cache_constructor != NULL &&
2425             cp->cache_constructor(buf, cp->cache_private, kmflag)
2426             != 0) {
2427 #ifdef DEBUG
2428                 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2429                     cp->cache_name, (void *)cp);
2430 #endif
2431                 /* reset curr pointer iff no allocs were done */
2432                 if (kmem_dump_curr == bufend)
2433                         kmem_dump_curr = curr;
2434 
2435                 /* fall back to normal alloc if the constructor fails */
2436                 KDI_LOG(cp, kdl_alloc_fails);
2437                 return (NULL);
2438         }
2439 
2440         KDI_LOG(cp, kdl_allocs);
2441         return (buf);
2442 }
2443 
2444 /*
2445  * Free a constructed object in alternate dump memory.
2446  */
2447 int
2448 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2449 {
2450         /* save constructed buffers for next time */
2451         if ((char *)buf >= (char *)kmem_dump_start &&
2452             (char *)buf < (char *)kmem_dump_end) {
2453                 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2454                 cp->cache_dumpfreelist = buf;
2455                 KDI_LOG(cp, kdl_frees);
2456                 return (0);
2457         }
2458 
2459         /* count all non-dump buf frees */
2460         KDI_LOG(cp, kdl_free_nondump);
2461 
2462         /* just drop buffers that were allocated before dump started */
2463         if (kmem_dump_curr < kmem_dump_end)
2464                 return (0);
2465 
2466         /* fall back to normal free if reserved area is used up */
2467         return (1);
2468 }
2469 
2470 /*
2471  * Allocate a constructed object from cache cp.
2472  */
2473 void *
2474 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2475 {
2476         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2477         kmem_magazine_t *fmp;
2478         void *buf;
2479 
2480         mutex_enter(&ccp->cc_lock);
2481         for (;;) {
2482                 /*
2483                  * If there's an object available in the current CPU's
2484                  * loaded magazine, just take it and return.
2485                  */
2486                 if (ccp->cc_rounds > 0) {
2487                         buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2488                         ccp->cc_alloc++;
2489                         mutex_exit(&ccp->cc_lock);
2490                         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2491                                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2492                                         ASSERT(!(ccp->cc_flags &
2493                                             KMF_DUMPDIVERT));
2494                                         KDI_LOG(cp, kdl_unsafe);
2495                                 }
2496                                 if ((ccp->cc_flags & KMF_BUFTAG) &&
2497                                     kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2498                                     caller()) != 0) {
2499                                         if (kmflag & KM_NOSLEEP)
2500                                                 return (NULL);
2501                                         mutex_enter(&ccp->cc_lock);
2502                                         continue;
2503                                 }
2504                         }
2505                         return (buf);
2506                 }
2507 
2508                 /*
2509                  * The loaded magazine is empty.  If the previously loaded
2510                  * magazine was full, exchange them and try again.
2511                  */
2512                 if (ccp->cc_prounds > 0) {
2513                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2514                         continue;
2515                 }
2516 
2517                 /*
2518                  * Return an alternate buffer at dump time to preserve
2519                  * the heap.
2520                  */
2521                 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2522                         if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2523                                 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2524                                 /* log it so that we can warn about it */
2525                                 KDI_LOG(cp, kdl_unsafe);
2526                         } else {
2527                                 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2528                                     NULL) {
2529                                         mutex_exit(&ccp->cc_lock);
2530                                         return (buf);
2531                                 }
2532                                 break;          /* fall back to slab layer */
2533                         }
2534                 }
2535 
2536                 /*
2537                  * If the magazine layer is disabled, break out now.
2538                  */
2539                 if (ccp->cc_magsize == 0)
2540                         break;
2541 
2542                 /*
2543                  * Try to get a full magazine from the depot.
2544                  */
2545                 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2546                 if (fmp != NULL) {
2547                         if (ccp->cc_ploaded != NULL)
2548                                 kmem_depot_free(cp, &cp->cache_empty,
2549                                     ccp->cc_ploaded);
2550                         kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2551                         continue;
2552                 }
2553 
2554                 /*
2555                  * There are no full magazines in the depot,
2556                  * so fall through to the slab layer.
2557                  */
2558                 break;
2559         }
2560         mutex_exit(&ccp->cc_lock);
2561 
2562         /*
2563          * We couldn't allocate a constructed object from the magazine layer,
2564          * so get a raw buffer from the slab layer and apply its constructor.
2565          */
2566         buf = kmem_slab_alloc(cp, kmflag);
2567 
2568         if (buf == NULL)
2569                 return (NULL);
2570 
2571         if (cp->cache_flags & KMF_BUFTAG) {
2572                 /*
2573                  * Make kmem_cache_alloc_debug() apply the constructor for us.
2574                  */
2575                 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2576                 if (rc != 0) {
2577                         if (kmflag & KM_NOSLEEP)
2578                                 return (NULL);
2579                         /*
2580                          * kmem_cache_alloc_debug() detected corruption
2581                          * but didn't panic (kmem_panic <= 0). We should not be
2582                          * here because the constructor failed (indicated by a
2583                          * return code of 1). Try again.
2584                          */
2585                         ASSERT(rc == -1);
2586                         return (kmem_cache_alloc(cp, kmflag));
2587                 }
2588                 return (buf);
2589         }
2590 
2591         if (cp->cache_constructor != NULL &&
2592             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2593                 atomic_inc_64(&cp->cache_alloc_fail);
2594                 kmem_slab_free(cp, buf);
2595                 return (NULL);
2596         }
2597 
2598         return (buf);
2599 }
2600 
2601 /*
2602  * The freed argument tells whether or not kmem_cache_free_debug() has already
2603  * been called so that we can avoid the duplicate free error. For example, a
2604  * buffer on a magazine has already been freed by the client but is still
2605  * constructed.
2606  */
2607 static void
2608 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2609 {
2610         if (!freed && (cp->cache_flags & KMF_BUFTAG))
2611                 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2612                         return;
2613 
2614         /*
2615          * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2616          * kmem_cache_free_debug() will have already applied the destructor.
2617          */
2618         if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2619             cp->cache_destructor != NULL) {
2620                 if (cp->cache_flags & KMF_DEADBEEF) {    /* KMF_LITE implied */
2621                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2622                         *(uint64_t *)buf = btp->bt_redzone;
2623                         cp->cache_destructor(buf, cp->cache_private);
2624                         *(uint64_t *)buf = KMEM_FREE_PATTERN;
2625                 } else {
2626                         cp->cache_destructor(buf, cp->cache_private);
2627                 }
2628         }
2629 
2630         kmem_slab_free(cp, buf);
2631 }
2632 
2633 /*
2634  * Used when there's no room to free a buffer to the per-CPU cache.
2635  * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2636  * caller should try freeing to the per-CPU cache again.
2637  * Note that we don't directly install the magazine in the cpu cache,
2638  * since its state may have changed wildly while the lock was dropped.
2639  */
2640 static int
2641 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2642 {
2643         kmem_magazine_t *emp;
2644         kmem_magtype_t *mtp;
2645 
2646         ASSERT(MUTEX_HELD(&ccp->cc_lock));
2647         ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2648             ((uint_t)ccp->cc_rounds == -1)) &&
2649             ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2650             ((uint_t)ccp->cc_prounds == -1)));
2651 
2652         emp = kmem_depot_alloc(cp, &cp->cache_empty);
2653         if (emp != NULL) {
2654                 if (ccp->cc_ploaded != NULL)
2655                         kmem_depot_free(cp, &cp->cache_full,
2656                             ccp->cc_ploaded);
2657                 kmem_cpu_reload(ccp, emp, 0);
2658                 return (1);
2659         }
2660         /*
2661          * There are no empty magazines in the depot,
2662          * so try to allocate a new one.  We must drop all locks
2663          * across kmem_cache_alloc() because lower layers may
2664          * attempt to allocate from this cache.
2665          */
2666         mtp = cp->cache_magtype;
2667         mutex_exit(&ccp->cc_lock);
2668         emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2669         mutex_enter(&ccp->cc_lock);
2670 
2671         if (emp != NULL) {
2672                 /*
2673                  * We successfully allocated an empty magazine.
2674                  * However, we had to drop ccp->cc_lock to do it,
2675                  * so the cache's magazine size may have changed.
2676                  * If so, free the magazine and try again.
2677                  */
2678                 if (ccp->cc_magsize != mtp->mt_magsize) {
2679                         mutex_exit(&ccp->cc_lock);
2680                         kmem_cache_free(mtp->mt_cache, emp);
2681                         mutex_enter(&ccp->cc_lock);
2682                         return (1);
2683                 }
2684 
2685                 /*
2686                  * We got a magazine of the right size.  Add it to
2687                  * the depot and try the whole dance again.
2688                  */
2689                 kmem_depot_free(cp, &cp->cache_empty, emp);
2690                 return (1);
2691         }
2692 
2693         /*
2694          * We couldn't allocate an empty magazine,
2695          * so fall through to the slab layer.
2696          */
2697         return (0);
2698 }
2699 
2700 /*
2701  * Free a constructed object to cache cp.
2702  */
2703 void
2704 kmem_cache_free(kmem_cache_t *cp, void *buf)
2705 {
2706         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2707 
2708         /*
2709          * The client must not free either of the buffers passed to the move
2710          * callback function.
2711          */
2712         ASSERT(cp->cache_defrag == NULL ||
2713             cp->cache_defrag->kmd_thread != curthread ||
2714             (buf != cp->cache_defrag->kmd_from_buf &&
2715             buf != cp->cache_defrag->kmd_to_buf));
2716 
2717         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2718                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2719                         ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2720                         /* log it so that we can warn about it */
2721                         KDI_LOG(cp, kdl_unsafe);
2722                 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2723                         return;
2724                 }
2725                 if (ccp->cc_flags & KMF_BUFTAG) {
2726                         if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2727                                 return;
2728                 }
2729         }
2730 
2731         mutex_enter(&ccp->cc_lock);
2732         /*
2733          * Any changes to this logic should be reflected in kmem_slab_prefill()
2734          */
2735         for (;;) {
2736                 /*
2737                  * If there's a slot available in the current CPU's
2738                  * loaded magazine, just put the object there and return.
2739                  */
2740                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2741                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2742                         ccp->cc_free++;
2743                         mutex_exit(&ccp->cc_lock);
2744                         return;
2745                 }
2746 
2747                 /*
2748                  * The loaded magazine is full.  If the previously loaded
2749                  * magazine was empty, exchange them and try again.
2750                  */
2751                 if (ccp->cc_prounds == 0) {
2752                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2753                         continue;
2754                 }
2755 
2756                 /*
2757                  * If the magazine layer is disabled, break out now.
2758                  */
2759                 if (ccp->cc_magsize == 0)
2760                         break;
2761 
2762                 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2763                         /*
2764                          * We couldn't free our constructed object to the
2765                          * magazine layer, so apply its destructor and free it
2766                          * to the slab layer.
2767                          */
2768                         break;
2769                 }
2770         }
2771         mutex_exit(&ccp->cc_lock);
2772         kmem_slab_free_constructed(cp, buf, B_TRUE);
2773 }
2774 
2775 static void
2776 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2777 {
2778         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2779         int cache_flags = cp->cache_flags;
2780 
2781         kmem_bufctl_t *next, *head;
2782         size_t nbufs;
2783 
2784         /*
2785          * Completely allocate the newly created slab and put the pre-allocated
2786          * buffers in magazines. Any of the buffers that cannot be put in
2787          * magazines must be returned to the slab.
2788          */
2789         ASSERT(MUTEX_HELD(&cp->cache_lock));
2790         ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2791         ASSERT(cp->cache_constructor == NULL);
2792         ASSERT(sp->slab_cache == cp);
2793         ASSERT(sp->slab_refcnt == 1);
2794         ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2795         ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2796 
2797         head = sp->slab_head;
2798         nbufs = (sp->slab_chunks - sp->slab_refcnt);
2799         sp->slab_head = NULL;
2800         sp->slab_refcnt += nbufs;
2801         cp->cache_bufslab -= nbufs;
2802         cp->cache_slab_alloc += nbufs;
2803         list_insert_head(&cp->cache_complete_slabs, sp);
2804         cp->cache_complete_slab_count++;
2805         mutex_exit(&cp->cache_lock);
2806         mutex_enter(&ccp->cc_lock);
2807 
2808         while (head != NULL) {
2809                 void *buf = KMEM_BUF(cp, head);
2810                 /*
2811                  * If there's a slot available in the current CPU's
2812                  * loaded magazine, just put the object there and
2813                  * continue.
2814                  */
2815                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2816                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2817                             buf;
2818                         ccp->cc_free++;
2819                         nbufs--;
2820                         head = head->bc_next;
2821                         continue;
2822                 }
2823 
2824                 /*
2825                  * The loaded magazine is full.  If the previously
2826                  * loaded magazine was empty, exchange them and try
2827                  * again.
2828                  */
2829                 if (ccp->cc_prounds == 0) {
2830                         kmem_cpu_reload(ccp, ccp->cc_ploaded,
2831                             ccp->cc_prounds);
2832                         continue;
2833                 }
2834 
2835                 /*
2836                  * If the magazine layer is disabled, break out now.
2837                  */
2838 
2839                 if (ccp->cc_magsize == 0) {
2840                         break;
2841                 }
2842 
2843                 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2844                         break;
2845         }
2846         mutex_exit(&ccp->cc_lock);
2847         if (nbufs != 0) {
2848                 ASSERT(head != NULL);
2849 
2850                 /*
2851                  * If there was a failure, return remaining objects to
2852                  * the slab
2853                  */
2854                 while (head != NULL) {
2855                         ASSERT(nbufs != 0);
2856                         next = head->bc_next;
2857                         head->bc_next = NULL;
2858                         kmem_slab_free(cp, KMEM_BUF(cp, head));
2859                         head = next;
2860                         nbufs--;
2861                 }
2862         }
2863         ASSERT(head == NULL);
2864         ASSERT(nbufs == 0);
2865         mutex_enter(&cp->cache_lock);
2866 }
2867 
2868 void *
2869 kmem_zalloc(size_t size, int kmflag)
2870 {
2871         size_t index;
2872         void *buf;
2873 
2874         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2875                 kmem_cache_t *cp = kmem_alloc_table[index];
2876                 buf = kmem_cache_alloc(cp, kmflag);
2877                 if (buf != NULL) {
2878                         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2879                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2880                                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2881                                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2882 
2883                                 if (cp->cache_flags & KMF_LITE) {
2884                                         KMEM_BUFTAG_LITE_ENTER(btp,
2885                                             kmem_lite_count, caller());
2886                                 }
2887                         }
2888                         bzero(buf, size);
2889                 }
2890         } else {
2891                 buf = kmem_alloc(size, kmflag);
2892                 if (buf != NULL)
2893                         bzero(buf, size);
2894         }
2895         return (buf);
2896 }
2897 
2898 void *
2899 kmem_alloc(size_t size, int kmflag)
2900 {
2901         size_t index;
2902         kmem_cache_t *cp;
2903         void *buf;
2904 
2905         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2906                 cp = kmem_alloc_table[index];
2907                 /* fall through to kmem_cache_alloc() */
2908 
2909         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2910             kmem_big_alloc_table_max) {
2911                 cp = kmem_big_alloc_table[index];
2912                 /* fall through to kmem_cache_alloc() */
2913 
2914         } else {
2915                 if (size == 0)
2916                         return (NULL);
2917 
2918                 buf = vmem_alloc(kmem_oversize_arena, size,
2919                     kmflag & KM_VMFLAGS);
2920                 if (buf == NULL)
2921                         kmem_log_event(kmem_failure_log, NULL, NULL,
2922                             (void *)size);
2923                 else if (KMEM_DUMP(kmem_slab_cache)) {
2924                         /* stats for dump intercept */
2925                         kmem_dump_oversize_allocs++;
2926                         if (size > kmem_dump_oversize_max)
2927                                 kmem_dump_oversize_max = size;
2928                 }
2929                 return (buf);
2930         }
2931 
2932         buf = kmem_cache_alloc(cp, kmflag);
2933         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2934                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2935                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2936                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2937 
2938                 if (cp->cache_flags & KMF_LITE) {
2939                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2940                 }
2941         }
2942         return (buf);
2943 }
2944 
2945 void
2946 kmem_free(void *buf, size_t size)
2947 {
2948         size_t index;
2949         kmem_cache_t *cp;
2950 
2951         if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2952                 cp = kmem_alloc_table[index];
2953                 /* fall through to kmem_cache_free() */
2954 
2955         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2956             kmem_big_alloc_table_max) {
2957                 cp = kmem_big_alloc_table[index];
2958                 /* fall through to kmem_cache_free() */
2959 
2960         } else {
2961                 EQUIV(buf == NULL, size == 0);
2962                 if (buf == NULL && size == 0)
2963                         return;
2964                 vmem_free(kmem_oversize_arena, buf, size);
2965                 return;
2966         }
2967 
2968         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2969                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2970                 uint32_t *ip = (uint32_t *)btp;
2971                 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2972                         if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2973                                 kmem_error(KMERR_DUPFREE, cp, buf);
2974                                 return;
2975                         }
2976                         if (KMEM_SIZE_VALID(ip[1])) {
2977                                 ip[0] = KMEM_SIZE_ENCODE(size);
2978                                 kmem_error(KMERR_BADSIZE, cp, buf);
2979                         } else {
2980                                 kmem_error(KMERR_REDZONE, cp, buf);
2981                         }
2982                         return;
2983                 }
2984                 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2985                         kmem_error(KMERR_REDZONE, cp, buf);
2986                         return;
2987                 }
2988                 btp->bt_redzone = KMEM_REDZONE_PATTERN;
2989                 if (cp->cache_flags & KMF_LITE) {
2990                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2991                             caller());
2992                 }
2993         }
2994         kmem_cache_free(cp, buf);
2995 }
2996 
2997 void *
2998 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2999 {
3000         size_t realsize = size + vmp->vm_quantum;
3001         void *addr;
3002 
3003         /*
3004          * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
3005          * vm_quantum will cause integer wraparound.  Check for this, and
3006          * blow off the firewall page in this case.  Note that such a
3007          * giant allocation (the entire kernel address space) can never
3008          * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3009          * or sleep forever (VM_SLEEP).  Thus, there is no need for a
3010          * corresponding check in kmem_firewall_va_free().
3011          */
3012         if (realsize < size)
3013                 realsize = size;
3014 
3015         /*
3016          * While boot still owns resource management, make sure that this
3017          * redzone virtual address allocation is properly accounted for in
3018          * OBPs "virtual-memory" "available" lists because we're
3019          * effectively claiming them for a red zone.  If we don't do this,
3020          * the available lists become too fragmented and too large for the
3021          * current boot/kernel memory list interface.
3022          */
3023         addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3024 
3025         if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3026                 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3027 
3028         return (addr);
3029 }
3030 
3031 void
3032 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3033 {
3034         ASSERT((kvseg.s_base == NULL ?
3035             va_to_pfn((char *)addr + size) :
3036             hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3037 
3038         vmem_free(vmp, addr, size + vmp->vm_quantum);
3039 }
3040 
3041 /*
3042  * Try to allocate at least `size' bytes of memory without sleeping or
3043  * panicking. Return actual allocated size in `asize'. If allocation failed,
3044  * try final allocation with sleep or panic allowed.
3045  */
3046 void *
3047 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3048 {
3049         void *p;
3050 
3051         *asize = P2ROUNDUP(size, KMEM_ALIGN);
3052         do {
3053                 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3054                 if (p != NULL)
3055                         return (p);
3056                 *asize += KMEM_ALIGN;
3057         } while (*asize <= PAGESIZE);
3058 
3059         *asize = P2ROUNDUP(size, KMEM_ALIGN);
3060         return (kmem_alloc(*asize, kmflag));
3061 }
3062 
3063 /*
3064  * Reclaim all unused memory from a cache.
3065  */
3066 static void
3067 kmem_cache_reap(kmem_cache_t *cp)
3068 {
3069         ASSERT(taskq_member(kmem_taskq, curthread));
3070         cp->cache_reap++;
3071 
3072         /*
3073          * Ask the cache's owner to free some memory if possible.
3074          * The idea is to handle things like the inode cache, which
3075          * typically sits on a bunch of memory that it doesn't truly
3076          * *need*.  Reclaim policy is entirely up to the owner; this
3077          * callback is just an advisory plea for help.
3078          */
3079         if (cp->cache_reclaim != NULL) {
3080                 long delta;
3081 
3082                 /*
3083                  * Reclaimed memory should be reapable (not included in the
3084                  * depot's working set).
3085                  */
3086                 delta = cp->cache_full.ml_total;
3087                 cp->cache_reclaim(cp->cache_private);
3088                 delta = cp->cache_full.ml_total - delta;
3089                 if (delta > 0) {
3090                         mutex_enter(&cp->cache_depot_lock);
3091                         cp->cache_full.ml_reaplimit += delta;
3092                         cp->cache_full.ml_min += delta;
3093                         mutex_exit(&cp->cache_depot_lock);
3094                 }
3095         }
3096 
3097         kmem_depot_ws_reap(cp);
3098 
3099         if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3100                 kmem_cache_defrag(cp);
3101         }
3102 }
3103 
3104 static void
3105 kmem_reap_timeout(void *flag_arg)
3106 {
3107         uint32_t *flag = (uint32_t *)flag_arg;
3108 
3109         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3110         *flag = 0;
3111 }
3112 
3113 static void
3114 kmem_reap_done(void *flag)
3115 {
3116         if (!callout_init_done) {
3117                 /* can't schedule a timeout at this point */
3118                 kmem_reap_timeout(flag);
3119         } else {
3120                 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3121         }
3122 }
3123 
3124 static void
3125 kmem_reap_start(void *flag)
3126 {
3127         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3128 
3129         if (flag == &kmem_reaping) {
3130                 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3131                 /*
3132                  * if we have segkp under heap, reap segkp cache.
3133                  */
3134                 if (segkp_fromheap)
3135                         segkp_cache_free();
3136         }
3137         else
3138                 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3139 
3140         /*
3141          * We use taskq_dispatch() to schedule a timeout to clear
3142          * the flag so that kmem_reap() becomes self-throttling:
3143          * we won't reap again until the current reap completes *and*
3144          * at least kmem_reap_interval ticks have elapsed.
3145          */
3146         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3147                 kmem_reap_done(flag);
3148 }
3149 
3150 static void
3151 kmem_reap_common(void *flag_arg)
3152 {
3153         uint32_t *flag = (uint32_t *)flag_arg;
3154 
3155         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3156             atomic_cas_32(flag, 0, 1) != 0)
3157                 return;
3158 
3159         /*
3160          * It may not be kosher to do memory allocation when a reap is called
3161          * (for example, if vmem_populate() is in the call chain).  So we
3162          * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3163          * fails, we reset the flag, and the next reap will try again.
3164          */
3165         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3166                 *flag = 0;
3167 }
3168 
3169 /*
3170  * Reclaim all unused memory from all caches.  Called from the VM system
3171  * when memory gets tight.
3172  */
3173 void
3174 kmem_reap(void)
3175 {
3176         kmem_reap_common(&kmem_reaping);
3177 }
3178 
3179 /*
3180  * Reclaim all unused memory from identifier arenas, called when a vmem
3181  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3182  * cannot help with identifier exhaustion, we avoid both a large amount of
3183  * work and unwanted side-effects from reclaim callbacks.
3184  */
3185 void
3186 kmem_reap_idspace(void)
3187 {
3188         kmem_reap_common(&kmem_reaping_idspace);
3189 }
3190 
3191 /*
3192  * Purge all magazines from a cache and set its magazine limit to zero.
3193  * All calls are serialized by the kmem_taskq lock, except for the final
3194  * call from kmem_cache_destroy().
3195  */
3196 static void
3197 kmem_cache_magazine_purge(kmem_cache_t *cp)
3198 {
3199         kmem_cpu_cache_t *ccp;
3200         kmem_magazine_t *mp, *pmp;
3201         int rounds, prounds, cpu_seqid;
3202 
3203         ASSERT(!list_link_active(&cp->cache_link) ||
3204             taskq_member(kmem_taskq, curthread));
3205         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3206 
3207         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3208                 ccp = &cp->cache_cpu[cpu_seqid];
3209 
3210                 mutex_enter(&ccp->cc_lock);
3211                 mp = ccp->cc_loaded;
3212                 pmp = ccp->cc_ploaded;
3213                 rounds = ccp->cc_rounds;
3214                 prounds = ccp->cc_prounds;
3215                 ccp->cc_loaded = NULL;
3216                 ccp->cc_ploaded = NULL;
3217                 ccp->cc_rounds = -1;
3218                 ccp->cc_prounds = -1;
3219                 ccp->cc_magsize = 0;
3220                 mutex_exit(&ccp->cc_lock);
3221 
3222                 if (mp)
3223                         kmem_magazine_destroy(cp, mp, rounds);
3224                 if (pmp)
3225                         kmem_magazine_destroy(cp, pmp, prounds);
3226         }
3227 
3228         kmem_depot_ws_zero(cp);
3229         kmem_depot_ws_reap(cp);
3230 }
3231 
3232 /*
3233  * Enable per-cpu magazines on a cache.
3234  */
3235 static void
3236 kmem_cache_magazine_enable(kmem_cache_t *cp)
3237 {
3238         int cpu_seqid;
3239 
3240         if (cp->cache_flags & KMF_NOMAGAZINE)
3241                 return;
3242 
3243         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3244                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3245                 mutex_enter(&ccp->cc_lock);
3246                 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3247                 mutex_exit(&ccp->cc_lock);
3248         }
3249 
3250 }
3251 
3252 /*
3253  * Allow our caller to determine if there are running reaps.
3254  *
3255  * This call is very conservative and may return B_TRUE even when
3256  * reaping activity isn't active. If it returns B_FALSE, then reaping
3257  * activity is definitely inactive.
3258  */
3259 boolean_t
3260 kmem_cache_reap_active(void)
3261 {
3262         return (!taskq_empty(kmem_taskq));
3263 }
3264 
3265 /*
3266  * Reap (almost) everything soon.
3267  *
3268  * Note: this does not wait for the reap-tasks to complete. Caller
3269  * should use kmem_cache_reap_active() (above) and/or moderation to
3270  * avoid scheduling too many reap-tasks.
3271  */
3272 void
3273 kmem_cache_reap_soon(kmem_cache_t *cp)
3274 {
3275         ASSERT(list_link_active(&cp->cache_link));
3276 
3277         kmem_depot_ws_zero(cp);
3278 
3279         (void) taskq_dispatch(kmem_taskq,
3280             (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3281 }
3282 
3283 /*
3284  * Recompute a cache's magazine size.  The trade-off is that larger magazines
3285  * provide a higher transfer rate with the depot, while smaller magazines
3286  * reduce memory consumption.  Magazine resizing is an expensive operation;
3287  * it should not be done frequently.
3288  *
3289  * Changes to the magazine size are serialized by the kmem_taskq lock.
3290  *
3291  * Note: at present this only grows the magazine size.  It might be useful
3292  * to allow shrinkage too.
3293  */
3294 static void
3295 kmem_cache_magazine_resize(kmem_cache_t *cp)
3296 {
3297         kmem_magtype_t *mtp = cp->cache_magtype;
3298 
3299         ASSERT(taskq_member(kmem_taskq, curthread));
3300 
3301         if (cp->cache_chunksize < mtp->mt_maxbuf) {
3302                 kmem_cache_magazine_purge(cp);
3303                 mutex_enter(&cp->cache_depot_lock);
3304                 cp->cache_magtype = ++mtp;
3305                 cp->cache_depot_contention_prev =
3306                     cp->cache_depot_contention + INT_MAX;
3307                 mutex_exit(&cp->cache_depot_lock);
3308                 kmem_cache_magazine_enable(cp);
3309         }
3310 }
3311 
3312 /*
3313  * Rescale a cache's hash table, so that the table size is roughly the
3314  * cache size.  We want the average lookup time to be extremely small.
3315  */
3316 static void
3317 kmem_hash_rescale(kmem_cache_t *cp)
3318 {
3319         kmem_bufctl_t **old_table, **new_table, *bcp;
3320         size_t old_size, new_size, h;
3321 
3322         ASSERT(taskq_member(kmem_taskq, curthread));
3323 
3324         new_size = MAX(KMEM_HASH_INITIAL,
3325             1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3326         old_size = cp->cache_hash_mask + 1;
3327 
3328         if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3329                 return;
3330 
3331         new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3332             VM_NOSLEEP);
3333         if (new_table == NULL)
3334                 return;
3335         bzero(new_table, new_size * sizeof (void *));
3336 
3337         mutex_enter(&cp->cache_lock);
3338 
3339         old_size = cp->cache_hash_mask + 1;
3340         old_table = cp->cache_hash_table;
3341 
3342         cp->cache_hash_mask = new_size - 1;
3343         cp->cache_hash_table = new_table;
3344         cp->cache_rescale++;
3345 
3346         for (h = 0; h < old_size; h++) {
3347                 bcp = old_table[h];
3348                 while (bcp != NULL) {
3349                         void *addr = bcp->bc_addr;
3350                         kmem_bufctl_t *next_bcp = bcp->bc_next;
3351                         kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3352                         bcp->bc_next = *hash_bucket;
3353                         *hash_bucket = bcp;
3354                         bcp = next_bcp;
3355                 }
3356         }
3357 
3358         mutex_exit(&cp->cache_lock);
3359 
3360         vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3361 }
3362 
3363 /*
3364  * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3365  * update, magazine resizing, and slab consolidation.
3366  */
3367 static void
3368 kmem_cache_update(kmem_cache_t *cp)
3369 {
3370         int need_hash_rescale = 0;
3371         int need_magazine_resize = 0;
3372 
3373         ASSERT(MUTEX_HELD(&kmem_cache_lock));
3374 
3375         /*
3376          * If the cache has become much larger or smaller than its hash table,
3377          * fire off a request to rescale the hash table.
3378          */
3379         mutex_enter(&cp->cache_lock);
3380 
3381         if ((cp->cache_flags & KMF_HASH) &&
3382             (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3383             (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3384             cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3385                 need_hash_rescale = 1;
3386 
3387         mutex_exit(&cp->cache_lock);
3388 
3389         /*
3390          * Update the depot working set statistics.
3391          */
3392         kmem_depot_ws_update(cp);
3393 
3394         /*
3395          * If there's a lot of contention in the depot,
3396          * increase the magazine size.
3397          */
3398         mutex_enter(&cp->cache_depot_lock);
3399 
3400         if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3401             (int)(cp->cache_depot_contention -
3402             cp->cache_depot_contention_prev) > kmem_depot_contention)
3403                 need_magazine_resize = 1;
3404 
3405         cp->cache_depot_contention_prev = cp->cache_depot_contention;
3406 
3407         mutex_exit(&cp->cache_depot_lock);
3408 
3409         if (need_hash_rescale)
3410                 (void) taskq_dispatch(kmem_taskq,
3411                     (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3412 
3413         if (need_magazine_resize)
3414                 (void) taskq_dispatch(kmem_taskq,
3415                     (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3416 
3417         if (cp->cache_defrag != NULL)
3418                 (void) taskq_dispatch(kmem_taskq,
3419                     (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3420 }
3421 
3422 static void kmem_update(void *);
3423 
3424 static void
3425 kmem_update_timeout(void *dummy)
3426 {
3427         (void) timeout(kmem_update, dummy, kmem_reap_interval);
3428 }
3429 
3430 static void
3431 kmem_update(void *dummy)
3432 {
3433         kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3434 
3435         /*
3436          * We use taskq_dispatch() to reschedule the timeout so that
3437          * kmem_update() becomes self-throttling: it won't schedule
3438          * new tasks until all previous tasks have completed.
3439          */
3440         if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3441                 kmem_update_timeout(NULL);
3442 }
3443 
3444 static int
3445 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3446 {
3447         struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3448         kmem_cache_t *cp = ksp->ks_private;
3449         uint64_t cpu_buf_avail;
3450         uint64_t buf_avail = 0;
3451         int cpu_seqid;
3452         long reap;
3453 
3454         ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3455 
3456         if (rw == KSTAT_WRITE)
3457                 return (EACCES);
3458 
3459         mutex_enter(&cp->cache_lock);
3460 
3461         kmcp->kmc_alloc_fail.value.ui64              = cp->cache_alloc_fail;
3462         kmcp->kmc_alloc.value.ui64           = cp->cache_slab_alloc;
3463         kmcp->kmc_free.value.ui64            = cp->cache_slab_free;
3464         kmcp->kmc_slab_alloc.value.ui64              = cp->cache_slab_alloc;
3465         kmcp->kmc_slab_free.value.ui64               = cp->cache_slab_free;
3466 
3467         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3468                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3469 
3470                 mutex_enter(&ccp->cc_lock);
3471 
3472                 cpu_buf_avail = 0;
3473                 if (ccp->cc_rounds > 0)
3474                         cpu_buf_avail += ccp->cc_rounds;
3475                 if (ccp->cc_prounds > 0)
3476                         cpu_buf_avail += ccp->cc_prounds;
3477 
3478                 kmcp->kmc_alloc.value.ui64   += ccp->cc_alloc;
3479                 kmcp->kmc_free.value.ui64    += ccp->cc_free;
3480                 buf_avail                       += cpu_buf_avail;
3481 
3482                 mutex_exit(&ccp->cc_lock);
3483         }
3484 
3485         mutex_enter(&cp->cache_depot_lock);
3486 
3487         kmcp->kmc_depot_alloc.value.ui64     = cp->cache_full.ml_alloc;
3488         kmcp->kmc_depot_free.value.ui64              = cp->cache_empty.ml_alloc;
3489         kmcp->kmc_depot_contention.value.ui64        = cp->cache_depot_contention;
3490         kmcp->kmc_full_magazines.value.ui64  = cp->cache_full.ml_total;
3491         kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3492         kmcp->kmc_magazine_size.value.ui64   =
3493             (cp->cache_flags & KMF_NOMAGAZINE) ?
3494             0 : cp->cache_magtype->mt_magsize;
3495 
3496         kmcp->kmc_alloc.value.ui64           += cp->cache_full.ml_alloc;
3497         kmcp->kmc_free.value.ui64            += cp->cache_empty.ml_alloc;
3498         buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3499 
3500         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3501         reap = MIN(reap, cp->cache_full.ml_total);
3502 
3503         mutex_exit(&cp->cache_depot_lock);
3504 
3505         kmcp->kmc_buf_size.value.ui64        = cp->cache_bufsize;
3506         kmcp->kmc_align.value.ui64   = cp->cache_align;
3507         kmcp->kmc_chunk_size.value.ui64      = cp->cache_chunksize;
3508         kmcp->kmc_slab_size.value.ui64       = cp->cache_slabsize;
3509         kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3510         buf_avail += cp->cache_bufslab;
3511         kmcp->kmc_buf_avail.value.ui64       = buf_avail;
3512         kmcp->kmc_buf_inuse.value.ui64       = cp->cache_buftotal - buf_avail;
3513         kmcp->kmc_buf_total.value.ui64       = cp->cache_buftotal;
3514         kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3515         kmcp->kmc_slab_create.value.ui64     = cp->cache_slab_create;
3516         kmcp->kmc_slab_destroy.value.ui64    = cp->cache_slab_destroy;
3517         kmcp->kmc_hash_size.value.ui64       = (cp->cache_flags & KMF_HASH) ?
3518             cp->cache_hash_mask + 1 : 0;
3519         kmcp->kmc_hash_lookup_depth.value.ui64       = cp->cache_lookup_depth;
3520         kmcp->kmc_hash_rescale.value.ui64    = cp->cache_rescale;
3521         kmcp->kmc_vmem_source.value.ui64     = cp->cache_arena->vm_id;
3522         kmcp->kmc_reap.value.ui64    = cp->cache_reap;
3523 
3524         if (cp->cache_defrag == NULL) {
3525                 kmcp->kmc_move_callbacks.value.ui64  = 0;
3526                 kmcp->kmc_move_yes.value.ui64                = 0;
3527                 kmcp->kmc_move_no.value.ui64         = 0;
3528                 kmcp->kmc_move_later.value.ui64              = 0;
3529                 kmcp->kmc_move_dont_need.value.ui64  = 0;
3530                 kmcp->kmc_move_dont_know.value.ui64  = 0;
3531                 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3532                 kmcp->kmc_move_slabs_freed.value.ui64        = 0;
3533                 kmcp->kmc_defrag.value.ui64          = 0;
3534                 kmcp->kmc_scan.value.ui64            = 0;
3535                 kmcp->kmc_move_reclaimable.value.ui64        = 0;
3536         } else {
3537                 int64_t reclaimable;
3538 
3539                 kmem_defrag_t *kd = cp->cache_defrag;
3540                 kmcp->kmc_move_callbacks.value.ui64  = kd->kmd_callbacks;
3541                 kmcp->kmc_move_yes.value.ui64                = kd->kmd_yes;
3542                 kmcp->kmc_move_no.value.ui64         = kd->kmd_no;
3543                 kmcp->kmc_move_later.value.ui64              = kd->kmd_later;
3544                 kmcp->kmc_move_dont_need.value.ui64  = kd->kmd_dont_need;
3545                 kmcp->kmc_move_dont_know.value.ui64  = kd->kmd_dont_know;
3546                 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3547                 kmcp->kmc_move_slabs_freed.value.ui64        = kd->kmd_slabs_freed;
3548                 kmcp->kmc_defrag.value.ui64          = kd->kmd_defrags;
3549                 kmcp->kmc_scan.value.ui64            = kd->kmd_scans;
3550 
3551                 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3552                 reclaimable = MAX(reclaimable, 0);
3553                 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3554                 kmcp->kmc_move_reclaimable.value.ui64        = reclaimable;
3555         }
3556 
3557         mutex_exit(&cp->cache_lock);
3558         return (0);
3559 }
3560 
3561 /*
3562  * Return a named statistic about a particular cache.
3563  * This shouldn't be called very often, so it's currently designed for
3564  * simplicity (leverages existing kstat support) rather than efficiency.
3565  */
3566 uint64_t
3567 kmem_cache_stat(kmem_cache_t *cp, char *name)
3568 {
3569         int i;
3570         kstat_t *ksp = cp->cache_kstat;
3571         kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3572         uint64_t value = 0;
3573 
3574         if (ksp != NULL) {
3575                 mutex_enter(&kmem_cache_kstat_lock);
3576                 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3577                 for (i = 0; i < ksp->ks_ndata; i++) {
3578                         if (strcmp(knp[i].name, name) == 0) {
3579                                 value = knp[i].value.ui64;
3580                                 break;
3581                         }
3582                 }
3583                 mutex_exit(&kmem_cache_kstat_lock);
3584         }
3585         return (value);
3586 }
3587 
3588 /*
3589  * Return an estimate of currently available kernel heap memory.
3590  * On 32-bit systems, physical memory may exceed virtual memory,
3591  * we just truncate the result at 1GB.
3592  */
3593 size_t
3594 kmem_avail(void)
3595 {
3596         spgcnt_t rmem = availrmem - tune.t_minarmem;
3597         spgcnt_t fmem = freemem - minfree;
3598 
3599         return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3600             1 << (30 - PAGESHIFT))));
3601 }
3602 
3603 /*
3604  * Return the maximum amount of memory that is (in theory) allocatable
3605  * from the heap. This may be used as an estimate only since there
3606  * is no guarentee this space will still be available when an allocation
3607  * request is made, nor that the space may be allocated in one big request
3608  * due to kernel heap fragmentation.
3609  */
3610 size_t
3611 kmem_maxavail(void)
3612 {
3613         spgcnt_t pmem = availrmem - tune.t_minarmem;
3614         spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3615 
3616         return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3617 }
3618 
3619 /*
3620  * Indicate whether memory-intensive kmem debugging is enabled.
3621  */
3622 int
3623 kmem_debugging(void)
3624 {
3625         return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3626 }
3627 
3628 /* binning function, sorts finely at the two extremes */
3629 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)                          \
3630         ((((sp)->slab_refcnt <= (binshift)) ||                            \
3631             (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))   \
3632             ? -(sp)->slab_refcnt                                     \
3633             : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3634 
3635 /*
3636  * Minimizing the number of partial slabs on the freelist minimizes
3637  * fragmentation (the ratio of unused buffers held by the slab layer). There are
3638  * two ways to get a slab off of the freelist: 1) free all the buffers on the
3639  * slab, and 2) allocate all the buffers on the slab. It follows that we want
3640  * the most-used slabs at the front of the list where they have the best chance
3641  * of being completely allocated, and the least-used slabs at a safe distance
3642  * from the front to improve the odds that the few remaining buffers will all be
3643  * freed before another allocation can tie up the slab. For that reason a slab
3644  * with a higher slab_refcnt sorts less than than a slab with a lower
3645  * slab_refcnt.
3646  *
3647  * However, if a slab has at least one buffer that is deemed unfreeable, we
3648  * would rather have that slab at the front of the list regardless of
3649  * slab_refcnt, since even one unfreeable buffer makes the entire slab
3650  * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3651  * callback, the slab is marked unfreeable for as long as it remains on the
3652  * freelist.
3653  */
3654 static int
3655 kmem_partial_slab_cmp(const void *p0, const void *p1)
3656 {
3657         const kmem_cache_t *cp;
3658         const kmem_slab_t *s0 = p0;
3659         const kmem_slab_t *s1 = p1;
3660         int w0, w1;
3661         size_t binshift;
3662 
3663         ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3664         ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3665         ASSERT(s0->slab_cache == s1->slab_cache);
3666         cp = s1->slab_cache;
3667         ASSERT(MUTEX_HELD(&cp->cache_lock));
3668         binshift = cp->cache_partial_binshift;
3669 
3670         /* weight of first slab */
3671         w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3672         if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3673                 w0 -= cp->cache_maxchunks;
3674         }
3675 
3676         /* weight of second slab */
3677         w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3678         if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3679                 w1 -= cp->cache_maxchunks;
3680         }
3681 
3682         if (w0 < w1)
3683                 return (-1);
3684         if (w0 > w1)
3685                 return (1);
3686 
3687         /* compare pointer values */
3688         if ((uintptr_t)s0 < (uintptr_t)s1)
3689                 return (-1);
3690         if ((uintptr_t)s0 > (uintptr_t)s1)
3691                 return (1);
3692 
3693         return (0);
3694 }
3695 
3696 /*
3697  * It must be valid to call the destructor (if any) on a newly created object.
3698  * That is, the constructor (if any) must leave the object in a valid state for
3699  * the destructor.
3700  */
3701 kmem_cache_t *
3702 kmem_cache_create(
3703         char *name,             /* descriptive name for this cache */
3704         size_t bufsize,         /* size of the objects it manages */
3705         size_t align,           /* required object alignment */
3706         int (*constructor)(void *, void *, int), /* object constructor */
3707         void (*destructor)(void *, void *),     /* object destructor */
3708         void (*reclaim)(void *), /* memory reclaim callback */
3709         void *private,          /* pass-thru arg for constr/destr/reclaim */
3710         vmem_t *vmp,            /* vmem source for slab allocation */
3711         int cflags)             /* cache creation flags */
3712 {
3713         int cpu_seqid;
3714         size_t chunksize;
3715         kmem_cache_t *cp;
3716         kmem_magtype_t *mtp;
3717         size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3718 
3719 #ifdef  DEBUG
3720         /*
3721          * Cache names should conform to the rules for valid C identifiers
3722          */
3723         if (!strident_valid(name)) {
3724                 cmn_err(CE_CONT,
3725                     "kmem_cache_create: '%s' is an invalid cache name\n"
3726                     "cache names must conform to the rules for "
3727                     "C identifiers\n", name);
3728         }
3729 #endif  /* DEBUG */
3730 
3731         if (vmp == NULL)
3732                 vmp = kmem_default_arena;
3733 
3734         /*
3735          * If this kmem cache has an identifier vmem arena as its source, mark
3736          * it such to allow kmem_reap_idspace().
3737          */
3738         ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3739         if (vmp->vm_cflags & VMC_IDENTIFIER)
3740                 cflags |= KMC_IDENTIFIER;
3741 
3742         /*
3743          * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3744          * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3745          * false sharing of per-CPU data.
3746          */
3747         cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3748             P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3749         bzero(cp, csize);
3750         list_link_init(&cp->cache_link);
3751 
3752         if (align == 0)
3753                 align = KMEM_ALIGN;
3754 
3755         /*
3756          * If we're not at least KMEM_ALIGN aligned, we can't use free
3757          * memory to hold bufctl information (because we can't safely
3758          * perform word loads and stores on it).
3759          */
3760         if (align < KMEM_ALIGN)
3761                 cflags |= KMC_NOTOUCH;
3762 
3763         if (!ISP2(align) || align > vmp->vm_quantum)
3764                 panic("kmem_cache_create: bad alignment %lu", align);
3765 
3766         mutex_enter(&kmem_flags_lock);
3767         if (kmem_flags & KMF_RANDOMIZE)
3768                 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3769                     KMF_RANDOMIZE;
3770         cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3771         mutex_exit(&kmem_flags_lock);
3772 
3773         /*
3774          * Make sure all the various flags are reasonable.
3775          */
3776         ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3777 
3778         if (cp->cache_flags & KMF_LITE) {
3779                 if (bufsize >= kmem_lite_minsize &&
3780                     align <= kmem_lite_maxalign &&
3781                     P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3782                         cp->cache_flags |= KMF_BUFTAG;
3783                         cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3784                 } else {
3785                         cp->cache_flags &= ~KMF_DEBUG;
3786                 }
3787         }
3788 
3789         if (cp->cache_flags & KMF_DEADBEEF)
3790                 cp->cache_flags |= KMF_REDZONE;
3791 
3792         if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3793                 cp->cache_flags |= KMF_NOMAGAZINE;
3794 
3795         if (cflags & KMC_NODEBUG)
3796                 cp->cache_flags &= ~KMF_DEBUG;
3797 
3798         if (cflags & KMC_NOTOUCH)
3799                 cp->cache_flags &= ~KMF_TOUCH;
3800 
3801         if (cflags & KMC_PREFILL)
3802                 cp->cache_flags |= KMF_PREFILL;
3803 
3804         if (cflags & KMC_NOHASH)
3805                 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3806 
3807         if (cflags & KMC_NOMAGAZINE)
3808                 cp->cache_flags |= KMF_NOMAGAZINE;
3809 
3810         if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3811                 cp->cache_flags |= KMF_REDZONE;
3812 
3813         if (!(cp->cache_flags & KMF_AUDIT))
3814                 cp->cache_flags &= ~KMF_CONTENTS;
3815 
3816         if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3817             !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3818                 cp->cache_flags |= KMF_FIREWALL;
3819 
3820         if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3821                 cp->cache_flags &= ~KMF_FIREWALL;
3822 
3823         if (cp->cache_flags & KMF_FIREWALL) {
3824                 cp->cache_flags &= ~KMF_BUFTAG;
3825                 cp->cache_flags |= KMF_NOMAGAZINE;
3826                 ASSERT(vmp == kmem_default_arena);
3827                 vmp = kmem_firewall_arena;
3828         }
3829 
3830         /*
3831          * Set cache properties.
3832          */
3833         (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3834         strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3835         cp->cache_bufsize = bufsize;
3836         cp->cache_align = align;
3837         cp->cache_constructor = constructor;
3838         cp->cache_destructor = destructor;
3839         cp->cache_reclaim = reclaim;
3840         cp->cache_private = private;
3841         cp->cache_arena = vmp;
3842         cp->cache_cflags = cflags;
3843 
3844         /*
3845          * Determine the chunk size.
3846          */
3847         chunksize = bufsize;
3848 
3849         if (align >= KMEM_ALIGN) {
3850                 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3851                 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3852         }
3853 
3854         if (cp->cache_flags & KMF_BUFTAG) {
3855                 cp->cache_bufctl = chunksize;
3856                 cp->cache_buftag = chunksize;
3857                 if (cp->cache_flags & KMF_LITE)
3858                         chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3859                 else
3860                         chunksize += sizeof (kmem_buftag_t);
3861         }
3862 
3863         if (cp->cache_flags & KMF_DEADBEEF) {
3864                 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3865                 if (cp->cache_flags & KMF_LITE)
3866                         cp->cache_verify = sizeof (uint64_t);
3867         }
3868 
3869         cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3870 
3871         cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3872 
3873         /*
3874          * Now that we know the chunk size, determine the optimal slab size.
3875          */
3876         if (vmp == kmem_firewall_arena) {
3877                 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3878                 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3879                 cp->cache_maxcolor = cp->cache_mincolor;
3880                 cp->cache_flags |= KMF_HASH;
3881                 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3882         } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3883             !(cp->cache_flags & KMF_AUDIT) &&
3884             chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3885                 cp->cache_slabsize = vmp->vm_quantum;
3886                 cp->cache_mincolor = 0;
3887                 cp->cache_maxcolor =
3888                     (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3889                 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3890                 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3891         } else {
3892                 size_t chunks, bestfit, waste, slabsize;
3893                 size_t minwaste = LONG_MAX;
3894 
3895                 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3896                         slabsize = P2ROUNDUP(chunksize * chunks,
3897                             vmp->vm_quantum);
3898                         chunks = slabsize / chunksize;
3899                         waste = (slabsize % chunksize) / chunks;
3900                         if (waste < minwaste) {
3901                                 minwaste = waste;
3902                                 bestfit = slabsize;
3903                         }
3904                 }
3905                 if (cflags & KMC_QCACHE)
3906                         bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3907                 cp->cache_slabsize = bestfit;
3908                 cp->cache_mincolor = 0;
3909                 cp->cache_maxcolor = bestfit % chunksize;
3910                 cp->cache_flags |= KMF_HASH;
3911         }
3912 
3913         cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3914         cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3915 
3916         /*
3917          * Disallowing prefill when either the DEBUG or HASH flag is set or when
3918          * there is a constructor avoids some tricky issues with debug setup
3919          * that may be revisited later. We cannot allow prefill in a
3920          * metadata cache because of potential recursion.
3921          */
3922         if (vmp == kmem_msb_arena ||
3923             cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3924             cp->cache_constructor != NULL)
3925                 cp->cache_flags &= ~KMF_PREFILL;
3926 
3927         if (cp->cache_flags & KMF_HASH) {
3928                 ASSERT(!(cflags & KMC_NOHASH));
3929                 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3930                     kmem_bufctl_audit_cache : kmem_bufctl_cache;
3931         }
3932 
3933         if (cp->cache_maxcolor >= vmp->vm_quantum)
3934                 cp->cache_maxcolor = vmp->vm_quantum - 1;
3935 
3936         cp->cache_color = cp->cache_mincolor;
3937 
3938         /*
3939          * Initialize the rest of the slab layer.
3940          */
3941         mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3942 
3943         avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3944             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3945         /* LINTED: E_TRUE_LOGICAL_EXPR */
3946         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3947         /* reuse partial slab AVL linkage for complete slab list linkage */
3948         list_create(&cp->cache_complete_slabs,
3949             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3950 
3951         if (cp->cache_flags & KMF_HASH) {
3952                 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3953                     KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3954                 bzero(cp->cache_hash_table,
3955                     KMEM_HASH_INITIAL * sizeof (void *));
3956                 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3957                 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3958         }
3959 
3960         /*
3961          * Initialize the depot.
3962          */
3963         mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3964 
3965         for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3966                 continue;
3967 
3968         cp->cache_magtype = mtp;
3969 
3970         /*
3971          * Initialize the CPU layer.
3972          */
3973         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3974                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3975                 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3976                 ccp->cc_flags = cp->cache_flags;
3977                 ccp->cc_rounds = -1;
3978                 ccp->cc_prounds = -1;
3979         }
3980 
3981         /*
3982          * Create the cache's kstats.
3983          */
3984         if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3985             "kmem_cache", KSTAT_TYPE_NAMED,
3986             sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3987             KSTAT_FLAG_VIRTUAL)) != NULL) {
3988                 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3989                 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3990                 cp->cache_kstat->ks_private = cp;
3991                 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3992                 kstat_install(cp->cache_kstat);
3993         }
3994 
3995         /*
3996          * Add the cache to the global list.  This makes it visible
3997          * to kmem_update(), so the cache must be ready for business.
3998          */
3999         mutex_enter(&kmem_cache_lock);
4000         list_insert_tail(&kmem_caches, cp);
4001         mutex_exit(&kmem_cache_lock);
4002 
4003         if (kmem_ready)
4004                 kmem_cache_magazine_enable(cp);
4005 
4006         return (cp);
4007 }
4008 
4009 static int
4010 kmem_move_cmp(const void *buf, const void *p)
4011 {
4012         const kmem_move_t *kmm = p;
4013         uintptr_t v1 = (uintptr_t)buf;
4014         uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
4015         return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4016 }
4017 
4018 static void
4019 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4020 {
4021         kmd->kmd_reclaim_numer = 1;
4022 }
4023 
4024 /*
4025  * Initially, when choosing candidate slabs for buffers to move, we want to be
4026  * very selective and take only slabs that are less than
4027  * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4028  * slabs, then we raise the allocation ceiling incrementally. The reclaim
4029  * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4030  * longer fragmented.
4031  */
4032 static void
4033 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4034 {
4035         if (direction > 0) {
4036                 /* make it easier to find a candidate slab */
4037                 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4038                         kmd->kmd_reclaim_numer++;
4039                 }
4040         } else {
4041                 /* be more selective */
4042                 if (kmd->kmd_reclaim_numer > 1) {
4043                         kmd->kmd_reclaim_numer--;
4044                 }
4045         }
4046 }
4047 
4048 void
4049 kmem_cache_set_move(kmem_cache_t *cp,
4050     kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4051 {
4052         kmem_defrag_t *defrag;
4053 
4054         ASSERT(move != NULL);
4055         /*
4056          * The consolidator does not support NOTOUCH caches because kmem cannot
4057          * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4058          * a low order bit usable by clients to distinguish uninitialized memory
4059          * from known objects (see kmem_slab_create).
4060          */
4061         ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4062         ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4063 
4064         /*
4065          * We should not be holding anyone's cache lock when calling
4066          * kmem_cache_alloc(), so allocate in all cases before acquiring the
4067          * lock.
4068          */
4069         defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4070 
4071         mutex_enter(&cp->cache_lock);
4072 
4073         if (KMEM_IS_MOVABLE(cp)) {
4074                 if (cp->cache_move == NULL) {
4075                         ASSERT(cp->cache_slab_alloc == 0);
4076 
4077                         cp->cache_defrag = defrag;
4078                         defrag = NULL; /* nothing to free */
4079                         bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4080                         avl_create(&cp->cache_defrag->kmd_moves_pending,
4081                             kmem_move_cmp, sizeof (kmem_move_t),
4082                             offsetof(kmem_move_t, kmm_entry));
4083                         /* LINTED: E_TRUE_LOGICAL_EXPR */
4084                         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4085                         /* reuse the slab's AVL linkage for deadlist linkage */
4086                         list_create(&cp->cache_defrag->kmd_deadlist,
4087                             sizeof (kmem_slab_t),
4088                             offsetof(kmem_slab_t, slab_link));
4089                         kmem_reset_reclaim_threshold(cp->cache_defrag);
4090                 }
4091                 cp->cache_move = move;
4092         }
4093 
4094         mutex_exit(&cp->cache_lock);
4095 
4096         if (defrag != NULL) {
4097                 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4098         }
4099 }
4100 
4101 void
4102 kmem_cache_destroy(kmem_cache_t *cp)
4103 {
4104         int cpu_seqid;
4105 
4106         /*
4107          * Remove the cache from the global cache list so that no one else
4108          * can schedule tasks on its behalf, wait for any pending tasks to
4109          * complete, purge the cache, and then destroy it.
4110          */
4111         mutex_enter(&kmem_cache_lock);
4112         list_remove(&kmem_caches, cp);
4113         mutex_exit(&kmem_cache_lock);
4114 
4115         if (kmem_taskq != NULL)
4116                 taskq_wait(kmem_taskq);
4117 
4118         if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4119                 taskq_wait(kmem_move_taskq);
4120 
4121         kmem_cache_magazine_purge(cp);
4122 
4123         mutex_enter(&cp->cache_lock);
4124         if (cp->cache_buftotal != 0)
4125                 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4126                     cp->cache_name, (void *)cp);
4127         if (cp->cache_defrag != NULL) {
4128                 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4129                 list_destroy(&cp->cache_defrag->kmd_deadlist);
4130                 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4131                 cp->cache_defrag = NULL;
4132         }
4133         /*
4134          * The cache is now dead.  There should be no further activity.  We
4135          * enforce this by setting land mines in the constructor, destructor,
4136          * reclaim, and move routines that induce a kernel text fault if
4137          * invoked.
4138          */
4139         cp->cache_constructor = (int (*)(void *, void *, int))1;
4140         cp->cache_destructor = (void (*)(void *, void *))2;
4141         cp->cache_reclaim = (void (*)(void *))3;
4142         cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4143         mutex_exit(&cp->cache_lock);
4144 
4145         kstat_delete(cp->cache_kstat);
4146 
4147         if (cp->cache_hash_table != NULL)
4148                 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4149                     (cp->cache_hash_mask + 1) * sizeof (void *));
4150 
4151         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4152                 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4153 
4154         mutex_destroy(&cp->cache_depot_lock);
4155         mutex_destroy(&cp->cache_lock);
4156 
4157         vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4158 }
4159 
4160 /*ARGSUSED*/
4161 static int
4162 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4163 {
4164         ASSERT(MUTEX_HELD(&cpu_lock));
4165         if (what == CPU_UNCONFIG) {
4166                 kmem_cache_applyall(kmem_cache_magazine_purge,
4167                     kmem_taskq, TQ_SLEEP);
4168                 kmem_cache_applyall(kmem_cache_magazine_enable,
4169                     kmem_taskq, TQ_SLEEP);
4170         }
4171         return (0);
4172 }
4173 
4174 static void
4175 kmem_alloc_caches_create(const int *array, size_t count,
4176     kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4177 {
4178         char name[KMEM_CACHE_NAMELEN + 1];
4179         size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4180         size_t size = table_unit;
4181         int i;
4182 
4183         for (i = 0; i < count; i++) {
4184                 size_t cache_size = array[i];
4185                 size_t align = KMEM_ALIGN;
4186                 kmem_cache_t *cp;
4187 
4188                 /* if the table has an entry for maxbuf, we're done */
4189                 if (size > maxbuf)
4190                         break;
4191 
4192                 /* cache size must be a multiple of the table unit */
4193                 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4194 
4195                 /*
4196                  * If they allocate a multiple of the coherency granularity,
4197                  * they get a coherency-granularity-aligned address.
4198                  */
4199                 if (IS_P2ALIGNED(cache_size, 64))
4200                         align = 64;
4201                 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4202                         align = PAGESIZE;
4203                 (void) snprintf(name, sizeof (name),
4204                     "kmem_alloc_%lu", cache_size);
4205                 cp = kmem_cache_create(name, cache_size, align,
4206                     NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4207 
4208                 while (size <= cache_size) {
4209                         alloc_table[(size - 1) >> shift] = cp;
4210                         size += table_unit;
4211                 }
4212         }
4213 
4214         ASSERT(size > maxbuf);               /* i.e. maxbuf <= max(cache_size) */
4215 }
4216 
4217 static void
4218 kmem_cache_init(int pass, int use_large_pages)
4219 {
4220         int i;
4221         size_t maxbuf;
4222         kmem_magtype_t *mtp;
4223 
4224         for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4225                 char name[KMEM_CACHE_NAMELEN + 1];
4226 
4227                 mtp = &kmem_magtype[i];
4228                 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4229                 mtp->mt_cache = kmem_cache_create(name,
4230                     (mtp->mt_magsize + 1) * sizeof (void *),
4231                     mtp->mt_align, NULL, NULL, NULL, NULL,
4232                     kmem_msb_arena, KMC_NOHASH);
4233         }
4234 
4235         kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4236             sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4237             kmem_msb_arena, KMC_NOHASH);
4238 
4239         kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4240             sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4241             kmem_msb_arena, KMC_NOHASH);
4242 
4243         kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4244             sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4245             kmem_msb_arena, KMC_NOHASH);
4246 
4247         if (pass == 2) {
4248                 kmem_va_arena = vmem_create("kmem_va",
4249                     NULL, 0, PAGESIZE,
4250                     vmem_alloc, vmem_free, heap_arena,
4251                     8 * PAGESIZE, VM_SLEEP);
4252 
4253                 if (use_large_pages) {
4254                         kmem_default_arena = vmem_xcreate("kmem_default",
4255                             NULL, 0, PAGESIZE,
4256                             segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4257                             0, VMC_DUMPSAFE | VM_SLEEP);
4258                 } else {
4259                         kmem_default_arena = vmem_create("kmem_default",
4260                             NULL, 0, PAGESIZE,
4261                             segkmem_alloc, segkmem_free, kmem_va_arena,
4262                             0, VMC_DUMPSAFE | VM_SLEEP);
4263                 }
4264 
4265                 /* Figure out what our maximum cache size is */
4266                 maxbuf = kmem_max_cached;
4267                 if (maxbuf <= KMEM_MAXBUF) {
4268                         maxbuf = 0;
4269                         kmem_max_cached = KMEM_MAXBUF;
4270                 } else {
4271                         size_t size = 0;
4272                         size_t max =
4273                             sizeof (kmem_big_alloc_sizes) / sizeof (int);
4274                         /*
4275                          * Round maxbuf up to an existing cache size.  If maxbuf
4276                          * is larger than the largest cache, we truncate it to
4277                          * the largest cache's size.
4278                          */
4279                         for (i = 0; i < max; i++) {
4280                                 size = kmem_big_alloc_sizes[i];
4281                                 if (maxbuf <= size)
4282                                         break;
4283                         }
4284                         kmem_max_cached = maxbuf = size;
4285                 }
4286 
4287                 /*
4288                  * The big alloc table may not be completely overwritten, so
4289                  * we clear out any stale cache pointers from the first pass.
4290                  */
4291                 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4292         } else {
4293                 /*
4294                  * During the first pass, the kmem_alloc_* caches
4295                  * are treated as metadata.
4296                  */
4297                 kmem_default_arena = kmem_msb_arena;
4298                 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4299         }
4300 
4301         /*
4302          * Set up the default caches to back kmem_alloc()
4303          */
4304         kmem_alloc_caches_create(
4305             kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4306             kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4307 
4308         kmem_alloc_caches_create(
4309             kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4310             kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4311 
4312         kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4313 }
4314 
4315 void
4316 kmem_init(void)
4317 {
4318         kmem_cache_t *cp;
4319         int old_kmem_flags = kmem_flags;
4320         int use_large_pages = 0;
4321         size_t maxverify, minfirewall;
4322 
4323         kstat_init();
4324 
4325         /*
4326          * Don't do firewalled allocations if the heap is less than 1TB
4327          * (i.e. on a 32-bit kernel)
4328          * The resulting VM_NEXTFIT allocations would create too much
4329          * fragmentation in a small heap.
4330          */
4331 #if defined(_LP64)
4332         maxverify = minfirewall = PAGESIZE / 2;
4333 #else
4334         maxverify = minfirewall = ULONG_MAX;
4335 #endif
4336 
4337         /* LINTED */
4338         ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4339 
4340         list_create(&kmem_caches, sizeof (kmem_cache_t),
4341             offsetof(kmem_cache_t, cache_link));
4342 
4343         kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4344             vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4345             VM_SLEEP | VMC_NO_QCACHE);
4346 
4347         kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4348             PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4349             VMC_DUMPSAFE | VM_SLEEP);
4350 
4351         kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4352             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4353 
4354         kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4355             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4356 
4357         kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4358             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4359 
4360         kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4361             NULL, 0, PAGESIZE,
4362             kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4363             0, VM_SLEEP);
4364 
4365         kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4366             segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4367             VMC_DUMPSAFE | VM_SLEEP);
4368 
4369         /* temporary oversize arena for mod_read_system_file */
4370         kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4371             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4372 
4373         kmem_reap_interval = 15 * hz;
4374 
4375         /*
4376          * Read /etc/system.  This is a chicken-and-egg problem because
4377          * kmem_flags may be set in /etc/system, but mod_read_system_file()
4378          * needs to use the allocator.  The simplest solution is to create
4379          * all the standard kmem caches, read /etc/system, destroy all the
4380          * caches we just created, and then create them all again in light
4381          * of the (possibly) new kmem_flags and other kmem tunables.
4382          */
4383         kmem_cache_init(1, 0);
4384 
4385         mod_read_system_file(boothowto & RB_ASKNAME);
4386 
4387         while ((cp = list_tail(&kmem_caches)) != NULL)
4388                 kmem_cache_destroy(cp);
4389 
4390         vmem_destroy(kmem_oversize_arena);
4391 
4392         if (old_kmem_flags & KMF_STICKY)
4393                 kmem_flags = old_kmem_flags;
4394 
4395         if (!(kmem_flags & KMF_AUDIT))
4396                 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4397 
4398         if (kmem_maxverify == 0)
4399                 kmem_maxverify = maxverify;
4400 
4401         if (kmem_minfirewall == 0)
4402                 kmem_minfirewall = minfirewall;
4403 
4404         /*
4405          * give segkmem a chance to figure out if we are using large pages
4406          * for the kernel heap
4407          */
4408         use_large_pages = segkmem_lpsetup();
4409 
4410         /*
4411          * To protect against corruption, we keep the actual number of callers
4412          * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
4413          * to 16, since the overhead for small buffers quickly gets out of
4414          * hand.
4415          *
4416          * The real limit would depend on the needs of the largest KMC_NOHASH
4417          * cache.
4418          */
4419         kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4420         kmem_lite_pcs = kmem_lite_count;
4421 
4422         /*
4423          * Normally, we firewall oversized allocations when possible, but
4424          * if we are using large pages for kernel memory, and we don't have
4425          * any non-LITE debugging flags set, we want to allocate oversized
4426          * buffers from large pages, and so skip the firewalling.
4427          */
4428         if (use_large_pages &&
4429             ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4430                 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4431                     PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4432                     0, VMC_DUMPSAFE | VM_SLEEP);
4433         } else {
4434                 kmem_oversize_arena = vmem_create("kmem_oversize",
4435                     NULL, 0, PAGESIZE,
4436                     segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4437                     kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4438                     VM_SLEEP);
4439         }
4440 
4441         kmem_cache_init(2, use_large_pages);
4442 
4443         if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4444                 if (kmem_transaction_log_size == 0)
4445                         kmem_transaction_log_size = kmem_maxavail() / 50;
4446                 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4447         }
4448 
4449         if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4450                 if (kmem_content_log_size == 0)
4451                         kmem_content_log_size = kmem_maxavail() / 50;
4452                 kmem_content_log = kmem_log_init(kmem_content_log_size);
4453         }
4454 
4455         kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4456 
4457         kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4458 
4459         /*
4460          * Initialize STREAMS message caches so allocb() is available.
4461          * This allows us to initialize the logging framework (cmn_err(9F),
4462          * strlog(9F), etc) so we can start recording messages.
4463          */
4464         streams_msg_init();
4465 
4466         /*
4467          * Initialize the ZSD framework in Zones so modules loaded henceforth
4468          * can register their callbacks.
4469          */
4470         zone_zsd_init();
4471 
4472         log_init();
4473         taskq_init();
4474 
4475         /*
4476          * Warn about invalid or dangerous values of kmem_flags.
4477          * Always warn about unsupported values.
4478          */
4479         if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4480             KMF_CONTENTS | KMF_LITE)) != 0) ||
4481             ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4482                 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4483                     "See the Solaris Tunable Parameters Reference Manual.",
4484                     kmem_flags);
4485 
4486 #ifdef DEBUG
4487         if ((kmem_flags & KMF_DEBUG) == 0)
4488                 cmn_err(CE_NOTE, "kmem debugging disabled.");
4489 #else
4490         /*
4491          * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4492          * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4493          * if KMF_AUDIT is set). We should warn the user about the performance
4494          * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4495          * isn't set (since that disables AUDIT).
4496          */
4497         if (!(kmem_flags & KMF_LITE) &&
4498             (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4499                 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4500                     "enabled (kmem_flags = 0x%x).  Performance degradation "
4501                     "and large memory overhead possible. See the Solaris "
4502                     "Tunable Parameters Reference Manual.", kmem_flags);
4503 #endif /* not DEBUG */
4504 
4505         kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4506 
4507         kmem_ready = 1;
4508 
4509         /*
4510          * Initialize the platform-specific aligned/DMA memory allocator.
4511          */
4512         ka_init();
4513 
4514         /*
4515          * Initialize 32-bit ID cache.
4516          */
4517         id32_init();
4518 
4519         /*
4520          * Initialize the networking stack so modules loaded can
4521          * register their callbacks.
4522          */
4523         netstack_init();
4524 }
4525 
4526 static void
4527 kmem_move_init(void)
4528 {
4529         kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4530             sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4531             kmem_msb_arena, KMC_NOHASH);
4532         kmem_move_cache = kmem_cache_create("kmem_move_cache",
4533             sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4534             kmem_msb_arena, KMC_NOHASH);
4535 
4536         /*
4537          * kmem guarantees that move callbacks are sequential and that even
4538          * across multiple caches no two moves ever execute simultaneously.
4539          * Move callbacks are processed on a separate taskq so that client code
4540          * does not interfere with internal maintenance tasks.
4541          */
4542         kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4543             minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4544 }
4545 
4546 void
4547 kmem_thread_init(void)
4548 {
4549         kmem_move_init();
4550         kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4551             300, INT_MAX, TASKQ_PREPOPULATE);
4552 }
4553 
4554 void
4555 kmem_mp_init(void)
4556 {
4557         mutex_enter(&cpu_lock);
4558         register_cpu_setup_func(kmem_cpu_setup, NULL);
4559         mutex_exit(&cpu_lock);
4560 
4561         kmem_update_timeout(NULL);
4562 
4563         taskq_mp_init();
4564 }
4565 
4566 /*
4567  * Return the slab of the allocated buffer, or NULL if the buffer is not
4568  * allocated. This function may be called with a known slab address to determine
4569  * whether or not the buffer is allocated, or with a NULL slab address to obtain
4570  * an allocated buffer's slab.
4571  */
4572 static kmem_slab_t *
4573 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4574 {
4575         kmem_bufctl_t *bcp, *bufbcp;
4576 
4577         ASSERT(MUTEX_HELD(&cp->cache_lock));
4578         ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4579 
4580         if (cp->cache_flags & KMF_HASH) {
4581                 for (bcp = *KMEM_HASH(cp, buf);
4582                     (bcp != NULL) && (bcp->bc_addr != buf);
4583                     bcp = bcp->bc_next) {
4584                         continue;
4585                 }
4586                 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4587                 return (bcp == NULL ? NULL : bcp->bc_slab);
4588         }
4589 
4590         if (sp == NULL) {
4591                 sp = KMEM_SLAB(cp, buf);
4592         }
4593         bufbcp = KMEM_BUFCTL(cp, buf);
4594         for (bcp = sp->slab_head;
4595             (bcp != NULL) && (bcp != bufbcp);
4596             bcp = bcp->bc_next) {
4597                 continue;
4598         }
4599         return (bcp == NULL ? sp : NULL);
4600 }
4601 
4602 static boolean_t
4603 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4604 {
4605         long refcnt = sp->slab_refcnt;
4606 
4607         ASSERT(cp->cache_defrag != NULL);
4608 
4609         /*
4610          * For code coverage we want to be able to move an object within the
4611          * same slab (the only partial slab) even if allocating the destination
4612          * buffer resulted in a completely allocated slab.
4613          */
4614         if (flags & KMM_DEBUG) {
4615                 return ((flags & KMM_DESPERATE) ||
4616                     ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4617         }
4618 
4619         /* If we're desperate, we don't care if the client said NO. */
4620         if (flags & KMM_DESPERATE) {
4621                 return (refcnt < sp->slab_chunks); /* any partial */
4622         }
4623 
4624         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4625                 return (B_FALSE);
4626         }
4627 
4628         if ((refcnt == 1) || kmem_move_any_partial) {
4629                 return (refcnt < sp->slab_chunks);
4630         }
4631 
4632         /*
4633          * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4634          * slabs with a progressively higher percentage of used buffers can be
4635          * reclaimed until the cache as a whole is no longer fragmented.
4636          *
4637          *      sp->slab_refcnt   kmd_reclaim_numer
4638          *      --------------- < ------------------
4639          *      sp->slab_chunks   KMEM_VOID_FRACTION
4640          */
4641         return ((refcnt * KMEM_VOID_FRACTION) <
4642             (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4643 }
4644 
4645 /*
4646  * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4647  * or when the buffer is freed.
4648  */
4649 static void
4650 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4651 {
4652         ASSERT(MUTEX_HELD(&cp->cache_lock));
4653         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4654 
4655         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4656                 return;
4657         }
4658 
4659         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4660                 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4661                         avl_remove(&cp->cache_partial_slabs, sp);
4662                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4663                         sp->slab_stuck_offset = (uint32_t)-1;
4664                         avl_add(&cp->cache_partial_slabs, sp);
4665                 }
4666         } else {
4667                 sp->slab_later_count = 0;
4668                 sp->slab_stuck_offset = (uint32_t)-1;
4669         }
4670 }
4671 
4672 static void
4673 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4674 {
4675         ASSERT(taskq_member(kmem_move_taskq, curthread));
4676         ASSERT(MUTEX_HELD(&cp->cache_lock));
4677         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4678 
4679         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4680                 return;
4681         }
4682 
4683         avl_remove(&cp->cache_partial_slabs, sp);
4684         sp->slab_later_count = 0;
4685         sp->slab_flags |= KMEM_SLAB_NOMOVE;
4686         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4687         avl_add(&cp->cache_partial_slabs, sp);
4688 }
4689 
4690 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4691 
4692 /*
4693  * The move callback takes two buffer addresses, the buffer to be moved, and a
4694  * newly allocated and constructed buffer selected by kmem as the destination.
4695  * It also takes the size of the buffer and an optional user argument specified
4696  * at cache creation time. kmem guarantees that the buffer to be moved has not
4697  * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4698  * guarantee the present whereabouts of the buffer to be moved, so it is up to
4699  * the client to safely determine whether or not it is still using the buffer.
4700  * The client must not free either of the buffers passed to the move callback,
4701  * since kmem wants to free them directly to the slab layer. The client response
4702  * tells kmem which of the two buffers to free:
4703  *
4704  * YES          kmem frees the old buffer (the move was successful)
4705  * NO           kmem frees the new buffer, marks the slab of the old buffer
4706  *              non-reclaimable to avoid bothering the client again
4707  * LATER        kmem frees the new buffer, increments slab_later_count
4708  * DONT_KNOW    kmem frees the new buffer
4709  * DONT_NEED    kmem frees both the old buffer and the new buffer
4710  *
4711  * The pending callback argument now being processed contains both of the
4712  * buffers (old and new) passed to the move callback function, the slab of the
4713  * old buffer, and flags related to the move request, such as whether or not the
4714  * system was desperate for memory.
4715  *
4716  * Slabs are not freed while there is a pending callback, but instead are kept
4717  * on a deadlist, which is drained after the last callback completes. This means
4718  * that slabs are safe to access until kmem_move_end(), no matter how many of
4719  * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4720  * zero for as long as the slab remains on the deadlist and until the slab is
4721  * freed.
4722  */
4723 static void
4724 kmem_move_buffer(kmem_move_t *callback)
4725 {
4726         kmem_cbrc_t response;
4727         kmem_slab_t *sp = callback->kmm_from_slab;
4728         kmem_cache_t *cp = sp->slab_cache;
4729         boolean_t free_on_slab;
4730 
4731         ASSERT(taskq_member(kmem_move_taskq, curthread));
4732         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4733         ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4734 
4735         /*
4736          * The number of allocated buffers on the slab may have changed since we
4737          * last checked the slab's reclaimability (when the pending move was
4738          * enqueued), or the client may have responded NO when asked to move
4739          * another buffer on the same slab.
4740          */
4741         if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4742                 kmem_slab_free(cp, callback->kmm_to_buf);
4743                 kmem_move_end(cp, callback);
4744                 return;
4745         }
4746 
4747         /*
4748          * Checking the slab layer is easy, so we might as well do that here
4749          * in case we can avoid bothering the client.
4750          */
4751         mutex_enter(&cp->cache_lock);
4752         free_on_slab = (kmem_slab_allocated(cp, sp,
4753             callback->kmm_from_buf) == NULL);
4754         mutex_exit(&cp->cache_lock);
4755 
4756         if (free_on_slab) {
4757                 kmem_slab_free(cp, callback->kmm_to_buf);
4758                 kmem_move_end(cp, callback);
4759                 return;
4760         }
4761 
4762         if (cp->cache_flags & KMF_BUFTAG) {
4763                 /*
4764                  * Make kmem_cache_alloc_debug() apply the constructor for us.
4765                  */
4766                 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4767                     KM_NOSLEEP, 1, caller()) != 0) {
4768                         kmem_move_end(cp, callback);
4769                         return;
4770                 }
4771         } else if (cp->cache_constructor != NULL &&
4772             cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4773             KM_NOSLEEP) != 0) {
4774                 atomic_inc_64(&cp->cache_alloc_fail);
4775                 kmem_slab_free(cp, callback->kmm_to_buf);
4776                 kmem_move_end(cp, callback);
4777                 return;
4778         }
4779 
4780         cp->cache_defrag->kmd_callbacks++;
4781         cp->cache_defrag->kmd_thread = curthread;
4782         cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4783         cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4784         DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4785             callback);
4786 
4787         response = cp->cache_move(callback->kmm_from_buf,
4788             callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4789 
4790         DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4791             callback, kmem_cbrc_t, response);
4792         cp->cache_defrag->kmd_thread = NULL;
4793         cp->cache_defrag->kmd_from_buf = NULL;
4794         cp->cache_defrag->kmd_to_buf = NULL;
4795 
4796         if (response == KMEM_CBRC_YES) {
4797                 cp->cache_defrag->kmd_yes++;
4798                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4799                 /* slab safe to access until kmem_move_end() */
4800                 if (sp->slab_refcnt == 0)
4801                         cp->cache_defrag->kmd_slabs_freed++;
4802                 mutex_enter(&cp->cache_lock);
4803                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4804                 mutex_exit(&cp->cache_lock);
4805                 kmem_move_end(cp, callback);
4806                 return;
4807         }
4808 
4809         switch (response) {
4810         case KMEM_CBRC_NO:
4811                 cp->cache_defrag->kmd_no++;
4812                 mutex_enter(&cp->cache_lock);
4813                 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4814                 mutex_exit(&cp->cache_lock);
4815                 break;
4816         case KMEM_CBRC_LATER:
4817                 cp->cache_defrag->kmd_later++;
4818                 mutex_enter(&cp->cache_lock);
4819                 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4820                         mutex_exit(&cp->cache_lock);
4821                         break;
4822                 }
4823 
4824                 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4825                         kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4826                 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4827                         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4828                             callback->kmm_from_buf);
4829                 }
4830                 mutex_exit(&cp->cache_lock);
4831                 break;
4832         case KMEM_CBRC_DONT_NEED:
4833                 cp->cache_defrag->kmd_dont_need++;
4834                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4835                 if (sp->slab_refcnt == 0)
4836                         cp->cache_defrag->kmd_slabs_freed++;
4837                 mutex_enter(&cp->cache_lock);
4838                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4839                 mutex_exit(&cp->cache_lock);
4840                 break;
4841         case KMEM_CBRC_DONT_KNOW:
4842                 /*
4843                  * If we don't know if we can move this buffer or not, we'll
4844                  * just assume that we can't:  if the buffer is in fact free,
4845                  * then it is sitting in one of the per-CPU magazines or in
4846                  * a full magazine in the depot layer.  Either way, because
4847                  * defrag is induced in the same logic that reaps a cache,
4848                  * it's likely that full magazines will be returned to the
4849                  * system soon (thereby accomplishing what we're trying to
4850                  * accomplish here: return those magazines to their slabs).
4851                  * Given this, any work that we might do now to locate a buffer
4852                  * in a magazine is wasted (and expensive!) work; we bump
4853                  * a counter in this case and otherwise assume that we can't
4854                  * move it.
4855                  */
4856                 cp->cache_defrag->kmd_dont_know++;
4857                 break;
4858         default:
4859                 panic("'%s' (%p) unexpected move callback response %d\n",
4860                     cp->cache_name, (void *)cp, response);
4861         }
4862 
4863         kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4864         kmem_move_end(cp, callback);
4865 }
4866 
4867 /* Return B_FALSE if there is insufficient memory for the move request. */
4868 static boolean_t
4869 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4870 {
4871         void *to_buf;
4872         avl_index_t index;
4873         kmem_move_t *callback, *pending;
4874         ulong_t n;
4875 
4876         ASSERT(taskq_member(kmem_taskq, curthread));
4877         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4878         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4879 
4880         callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4881 
4882         if (callback == NULL)
4883                 return (B_FALSE);
4884 
4885         callback->kmm_from_slab = sp;
4886         callback->kmm_from_buf = buf;
4887         callback->kmm_flags = flags;
4888 
4889         mutex_enter(&cp->cache_lock);
4890 
4891         n = avl_numnodes(&cp->cache_partial_slabs);
4892         if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4893                 mutex_exit(&cp->cache_lock);
4894                 kmem_cache_free(kmem_move_cache, callback);
4895                 return (B_TRUE); /* there is no need for the move request */
4896         }
4897 
4898         pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4899         if (pending != NULL) {
4900                 /*
4901                  * If the move is already pending and we're desperate now,
4902                  * update the move flags.
4903                  */
4904                 if (flags & KMM_DESPERATE) {
4905                         pending->kmm_flags |= KMM_DESPERATE;
4906                 }
4907                 mutex_exit(&cp->cache_lock);
4908                 kmem_cache_free(kmem_move_cache, callback);
4909                 return (B_TRUE);
4910         }
4911 
4912         to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4913             B_FALSE);
4914         callback->kmm_to_buf = to_buf;
4915         avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4916 
4917         mutex_exit(&cp->cache_lock);
4918 
4919         if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4920             callback, TQ_NOSLEEP)) {
4921                 mutex_enter(&cp->cache_lock);
4922                 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4923                 mutex_exit(&cp->cache_lock);
4924                 kmem_slab_free(cp, to_buf);
4925                 kmem_cache_free(kmem_move_cache, callback);
4926                 return (B_FALSE);
4927         }
4928 
4929         return (B_TRUE);
4930 }
4931 
4932 static void
4933 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4934 {
4935         avl_index_t index;
4936 
4937         ASSERT(cp->cache_defrag != NULL);
4938         ASSERT(taskq_member(kmem_move_taskq, curthread));
4939         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4940 
4941         mutex_enter(&cp->cache_lock);
4942         VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4943             callback->kmm_from_buf, &index) != NULL);
4944         avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4945         if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4946                 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4947                 kmem_slab_t *sp;
4948 
4949                 /*
4950                  * The last pending move completed. Release all slabs from the
4951                  * front of the dead list except for any slab at the tail that
4952                  * needs to be released from the context of kmem_move_buffers().
4953                  * kmem deferred unmapping the buffers on these slabs in order
4954                  * to guarantee that buffers passed to the move callback have
4955                  * been touched only by kmem or by the client itself.
4956                  */
4957                 while ((sp = list_remove_head(deadlist)) != NULL) {
4958                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4959                                 list_insert_tail(deadlist, sp);
4960                                 break;
4961                         }
4962                         cp->cache_defrag->kmd_deadcount--;
4963                         cp->cache_slab_destroy++;
4964                         mutex_exit(&cp->cache_lock);
4965                         kmem_slab_destroy(cp, sp);
4966                         mutex_enter(&cp->cache_lock);
4967                 }
4968         }
4969         mutex_exit(&cp->cache_lock);
4970         kmem_cache_free(kmem_move_cache, callback);
4971 }
4972 
4973 /*
4974  * Move buffers from least used slabs first by scanning backwards from the end
4975  * of the partial slab list. Scan at most max_scan candidate slabs and move
4976  * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4977  * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4978  * skip slabs with a ratio of allocated buffers at or above the current
4979  * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4980  * scan is aborted) so that the caller can adjust the reclaimability threshold
4981  * depending on how many reclaimable slabs it finds.
4982  *
4983  * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4984  * move request, since it is not valid for kmem_move_begin() to call
4985  * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4986  */
4987 static int
4988 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4989     int flags)
4990 {
4991         kmem_slab_t *sp;
4992         void *buf;
4993         int i, j; /* slab index, buffer index */
4994         int s; /* reclaimable slabs */
4995         int b; /* allocated (movable) buffers on reclaimable slab */
4996         boolean_t success;
4997         int refcnt;
4998         int nomove;
4999 
5000         ASSERT(taskq_member(kmem_taskq, curthread));
5001         ASSERT(MUTEX_HELD(&cp->cache_lock));
5002         ASSERT(kmem_move_cache != NULL);
5003         ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5004         ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5005             avl_numnodes(&cp->cache_partial_slabs) > 1);
5006 
5007         if (kmem_move_blocked) {
5008                 return (0);
5009         }
5010 
5011         if (kmem_move_fulltilt) {
5012                 flags |= KMM_DESPERATE;
5013         }
5014 
5015         if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5016                 /*
5017                  * Scan as many slabs as needed to find the desired number of
5018                  * candidate slabs.
5019                  */
5020                 max_scan = (size_t)-1;
5021         }
5022 
5023         if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5024                 /* Find as many candidate slabs as possible. */
5025                 max_slabs = (size_t)-1;
5026         }
5027 
5028         sp = avl_last(&cp->cache_partial_slabs);
5029         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5030         for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5031             ((sp != avl_first(&cp->cache_partial_slabs)) ||
5032             (flags & KMM_DEBUG));
5033             sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5034 
5035                 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5036                         continue;
5037                 }
5038                 s++;
5039 
5040                 /* Look for allocated buffers to move. */
5041                 for (j = 0, b = 0, buf = sp->slab_base;
5042                     (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5043                     buf = (((char *)buf) + cp->cache_chunksize), j++) {
5044 
5045                         if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5046                                 continue;
5047                         }
5048 
5049                         b++;
5050 
5051                         /*
5052                          * Prevent the slab from being destroyed while we drop
5053                          * cache_lock and while the pending move is not yet
5054                          * registered. Flag the pending move while
5055                          * kmd_moves_pending may still be empty, since we can't
5056                          * yet rely on a non-zero pending move count to prevent
5057                          * the slab from being destroyed.
5058                          */
5059                         ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5060                         sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5061                         /*
5062                          * Recheck refcnt and nomove after reacquiring the lock,
5063                          * since these control the order of partial slabs, and
5064                          * we want to know if we can pick up the scan where we
5065                          * left off.
5066                          */
5067                         refcnt = sp->slab_refcnt;
5068                         nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5069                         mutex_exit(&cp->cache_lock);
5070 
5071                         success = kmem_move_begin(cp, sp, buf, flags);
5072 
5073                         /*
5074                          * Now, before the lock is reacquired, kmem could
5075                          * process all pending move requests and purge the
5076                          * deadlist, so that upon reacquiring the lock, sp has
5077                          * been remapped. Or, the client may free all the
5078                          * objects on the slab while the pending moves are still
5079                          * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5080                          * flag causes the slab to be put at the end of the
5081                          * deadlist and prevents it from being destroyed, since
5082                          * we plan to destroy it here after reacquiring the
5083                          * lock.
5084                          */
5085                         mutex_enter(&cp->cache_lock);
5086                         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5087                         sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5088 
5089                         if (sp->slab_refcnt == 0) {
5090                                 list_t *deadlist =
5091                                     &cp->cache_defrag->kmd_deadlist;
5092                                 list_remove(deadlist, sp);
5093 
5094                                 if (!avl_is_empty(
5095                                     &cp->cache_defrag->kmd_moves_pending)) {
5096                                         /*
5097                                          * A pending move makes it unsafe to
5098                                          * destroy the slab, because even though
5099                                          * the move is no longer needed, the
5100                                          * context where that is determined
5101                                          * requires the slab to exist.
5102                                          * Fortunately, a pending move also
5103                                          * means we don't need to destroy the
5104                                          * slab here, since it will get
5105                                          * destroyed along with any other slabs
5106                                          * on the deadlist after the last
5107                                          * pending move completes.
5108                                          */
5109                                         list_insert_head(deadlist, sp);
5110                                         return (-1);
5111                                 }
5112 
5113                                 /*
5114                                  * Destroy the slab now if it was completely
5115                                  * freed while we dropped cache_lock and there
5116                                  * are no pending moves. Since slab_refcnt
5117                                  * cannot change once it reaches zero, no new
5118                                  * pending moves from that slab are possible.
5119                                  */
5120                                 cp->cache_defrag->kmd_deadcount--;
5121                                 cp->cache_slab_destroy++;
5122                                 mutex_exit(&cp->cache_lock);
5123                                 kmem_slab_destroy(cp, sp);
5124                                 mutex_enter(&cp->cache_lock);
5125                                 /*
5126                                  * Since we can't pick up the scan where we left
5127                                  * off, abort the scan and say nothing about the
5128                                  * number of reclaimable slabs.
5129                                  */
5130                                 return (-1);
5131                         }
5132 
5133                         if (!success) {
5134                                 /*
5135                                  * Abort the scan if there is not enough memory
5136                                  * for the request and say nothing about the
5137                                  * number of reclaimable slabs.
5138                                  */
5139                                 return (-1);
5140                         }
5141 
5142                         /*
5143                          * The slab's position changed while the lock was
5144                          * dropped, so we don't know where we are in the
5145                          * sequence any more.
5146                          */
5147                         if (sp->slab_refcnt != refcnt) {
5148                                 /*
5149                                  * If this is a KMM_DEBUG move, the slab_refcnt
5150                                  * may have changed because we allocated a
5151                                  * destination buffer on the same slab. In that
5152                                  * case, we're not interested in counting it.
5153                                  */
5154                                 return (-1);
5155                         }
5156                         if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5157                                 return (-1);
5158 
5159                         /*
5160                          * Generating a move request allocates a destination
5161                          * buffer from the slab layer, bumping the first partial
5162                          * slab if it is completely allocated. If the current
5163                          * slab becomes the first partial slab as a result, we
5164                          * can't continue to scan backwards.
5165                          *
5166                          * If this is a KMM_DEBUG move and we allocated the
5167                          * destination buffer from the last partial slab, then
5168                          * the buffer we're moving is on the same slab and our
5169                          * slab_refcnt has changed, causing us to return before
5170                          * reaching here if there are no partial slabs left.
5171                          */
5172                         ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5173                         if (sp == avl_first(&cp->cache_partial_slabs)) {
5174                                 /*
5175                                  * We're not interested in a second KMM_DEBUG
5176                                  * move.
5177                                  */
5178                                 goto end_scan;
5179                         }
5180                 }
5181         }
5182 end_scan:
5183 
5184         return (s);
5185 }
5186 
5187 typedef struct kmem_move_notify_args {
5188         kmem_cache_t *kmna_cache;
5189         void *kmna_buf;
5190 } kmem_move_notify_args_t;
5191 
5192 static void
5193 kmem_cache_move_notify_task(void *arg)
5194 {
5195         kmem_move_notify_args_t *args = arg;
5196         kmem_cache_t *cp = args->kmna_cache;
5197         void *buf = args->kmna_buf;
5198         kmem_slab_t *sp;
5199 
5200         ASSERT(taskq_member(kmem_taskq, curthread));
5201         ASSERT(list_link_active(&cp->cache_link));
5202 
5203         kmem_free(args, sizeof (kmem_move_notify_args_t));
5204         mutex_enter(&cp->cache_lock);
5205         sp = kmem_slab_allocated(cp, NULL, buf);
5206 
5207         /* Ignore the notification if the buffer is no longer allocated. */
5208         if (sp == NULL) {
5209                 mutex_exit(&cp->cache_lock);
5210                 return;
5211         }
5212 
5213         /* Ignore the notification if there's no reason to move the buffer. */
5214         if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5215                 /*
5216                  * So far the notification is not ignored. Ignore the
5217                  * notification if the slab is not marked by an earlier refusal
5218                  * to move a buffer.
5219                  */
5220                 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5221                     (sp->slab_later_count == 0)) {
5222                         mutex_exit(&cp->cache_lock);
5223                         return;
5224                 }
5225 
5226                 kmem_slab_move_yes(cp, sp, buf);
5227                 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5228                 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5229                 mutex_exit(&cp->cache_lock);
5230                 /* see kmem_move_buffers() about dropping the lock */
5231                 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5232                 mutex_enter(&cp->cache_lock);
5233                 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5234                 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5235                 if (sp->slab_refcnt == 0) {
5236                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5237                         list_remove(deadlist, sp);
5238 
5239                         if (!avl_is_empty(
5240                             &cp->cache_defrag->kmd_moves_pending)) {
5241                                 list_insert_head(deadlist, sp);
5242                                 mutex_exit(&cp->cache_lock);
5243                                 return;
5244                         }
5245 
5246                         cp->cache_defrag->kmd_deadcount--;
5247                         cp->cache_slab_destroy++;
5248                         mutex_exit(&cp->cache_lock);
5249                         kmem_slab_destroy(cp, sp);
5250                         return;
5251                 }
5252         } else {
5253                 kmem_slab_move_yes(cp, sp, buf);
5254         }
5255         mutex_exit(&cp->cache_lock);
5256 }
5257 
5258 void
5259 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5260 {
5261         kmem_move_notify_args_t *args;
5262 
5263         args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5264         if (args != NULL) {
5265                 args->kmna_cache = cp;
5266                 args->kmna_buf = buf;
5267                 if (!taskq_dispatch(kmem_taskq,
5268                     (task_func_t *)kmem_cache_move_notify_task, args,
5269                     TQ_NOSLEEP))
5270                         kmem_free(args, sizeof (kmem_move_notify_args_t));
5271         }
5272 }
5273 
5274 static void
5275 kmem_cache_defrag(kmem_cache_t *cp)
5276 {
5277         size_t n;
5278 
5279         ASSERT(cp->cache_defrag != NULL);
5280 
5281         mutex_enter(&cp->cache_lock);
5282         n = avl_numnodes(&cp->cache_partial_slabs);
5283         if (n > 1) {
5284                 /* kmem_move_buffers() drops and reacquires cache_lock */
5285                 cp->cache_defrag->kmd_defrags++;
5286                 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5287         }
5288         mutex_exit(&cp->cache_lock);
5289 }
5290 
5291 /* Is this cache above the fragmentation threshold? */
5292 static boolean_t
5293 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5294 {
5295         /*
5296          *      nfree           kmem_frag_numer
5297          * ------------------ > ---------------
5298          * cp->cache_buftotal        kmem_frag_denom
5299          */
5300         return ((nfree * kmem_frag_denom) >
5301             (cp->cache_buftotal * kmem_frag_numer));
5302 }
5303 
5304 static boolean_t
5305 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5306 {
5307         boolean_t fragmented;
5308         uint64_t nfree;
5309 
5310         ASSERT(MUTEX_HELD(&cp->cache_lock));
5311         *doreap = B_FALSE;
5312 
5313         if (kmem_move_fulltilt) {
5314                 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5315                         return (B_TRUE);
5316                 }
5317         } else {
5318                 if ((cp->cache_complete_slab_count + avl_numnodes(
5319                     &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5320                         return (B_FALSE);
5321                 }
5322         }
5323 
5324         nfree = cp->cache_bufslab;
5325         fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5326             kmem_cache_frag_threshold(cp, nfree));
5327 
5328         /*
5329          * Free buffers in the magazine layer appear allocated from the point of
5330          * view of the slab layer. We want to know if the slab layer would
5331          * appear fragmented if we included free buffers from magazines that
5332          * have fallen out of the working set.
5333          */
5334         if (!fragmented) {
5335                 long reap;
5336 
5337                 mutex_enter(&cp->cache_depot_lock);
5338                 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5339                 reap = MIN(reap, cp->cache_full.ml_total);
5340                 mutex_exit(&cp->cache_depot_lock);
5341 
5342                 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5343                 if (kmem_cache_frag_threshold(cp, nfree)) {
5344                         *doreap = B_TRUE;
5345                 }
5346         }
5347 
5348         return (fragmented);
5349 }
5350 
5351 /* Called periodically from kmem_taskq */
5352 static void
5353 kmem_cache_scan(kmem_cache_t *cp)
5354 {
5355         boolean_t reap = B_FALSE;
5356         kmem_defrag_t *kmd;
5357 
5358         ASSERT(taskq_member(kmem_taskq, curthread));
5359 
5360         mutex_enter(&cp->cache_lock);
5361 
5362         kmd = cp->cache_defrag;
5363         if (kmd->kmd_consolidate > 0) {
5364                 kmd->kmd_consolidate--;
5365                 mutex_exit(&cp->cache_lock);
5366                 kmem_cache_reap(cp);
5367                 return;
5368         }
5369 
5370         if (kmem_cache_is_fragmented(cp, &reap)) {
5371                 size_t slabs_found;
5372 
5373                 /*
5374                  * Consolidate reclaimable slabs from the end of the partial
5375                  * slab list (scan at most kmem_reclaim_scan_range slabs to find
5376                  * reclaimable slabs). Keep track of how many candidate slabs we
5377                  * looked for and how many we actually found so we can adjust
5378                  * the definition of a candidate slab if we're having trouble
5379                  * finding them.
5380                  *
5381                  * kmem_move_buffers() drops and reacquires cache_lock.
5382                  */
5383                 kmd->kmd_scans++;
5384                 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5385                     kmem_reclaim_max_slabs, 0);
5386                 if (slabs_found >= 0) {
5387                         kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5388                         kmd->kmd_slabs_found += slabs_found;
5389                 }
5390 
5391                 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5392                         kmd->kmd_tries = 0;
5393 
5394                         /*
5395                          * If we had difficulty finding candidate slabs in
5396                          * previous scans, adjust the threshold so that
5397                          * candidates are easier to find.
5398                          */
5399                         if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5400                                 kmem_adjust_reclaim_threshold(kmd, -1);
5401                         } else if ((kmd->kmd_slabs_found * 2) <
5402                             kmd->kmd_slabs_sought) {
5403                                 kmem_adjust_reclaim_threshold(kmd, 1);
5404                         }
5405                         kmd->kmd_slabs_sought = 0;
5406                         kmd->kmd_slabs_found = 0;
5407                 }
5408         } else {
5409                 kmem_reset_reclaim_threshold(cp->cache_defrag);
5410 #ifdef  DEBUG
5411                 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5412                         /*
5413                          * In a debug kernel we want the consolidator to
5414                          * run occasionally even when there is plenty of
5415                          * memory.
5416                          */
5417                         uint16_t debug_rand;
5418 
5419                         (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5420                         if (!kmem_move_noreap &&
5421                             ((debug_rand % kmem_mtb_reap) == 0)) {
5422                                 mutex_exit(&cp->cache_lock);
5423                                 kmem_cache_reap(cp);
5424                                 return;
5425                         } else if ((debug_rand % kmem_mtb_move) == 0) {
5426                                 kmd->kmd_scans++;
5427                                 (void) kmem_move_buffers(cp,
5428                                     kmem_reclaim_scan_range, 1, KMM_DEBUG);
5429                         }
5430                 }
5431 #endif  /* DEBUG */
5432         }
5433 
5434         mutex_exit(&cp->cache_lock);
5435 
5436         if (reap)
5437                 kmem_depot_ws_reap(cp);
5438 }