1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
  24  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  25  * Copyright 2018, Joyent, Inc.
  26  */
  27 
  28 /*
  29  * Kernel memory allocator, as described in the following two papers and a
  30  * statement about the consolidator:
  31  *
  32  * Jeff Bonwick,
  33  * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
  34  * Proceedings of the Summer 1994 Usenix Conference.
  35  * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
  36  *
  37  * Jeff Bonwick and Jonathan Adams,
  38  * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
  39  * Arbitrary Resources.
  40  * Proceedings of the 2001 Usenix Conference.
  41  * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
  42  *
  43  * kmem Slab Consolidator Big Theory Statement:
  44  *
  45  * 1. Motivation
  46  *
  47  * As stated in Bonwick94, slabs provide the following advantages over other
  48  * allocation structures in terms of memory fragmentation:
  49  *
  50  *  - Internal fragmentation (per-buffer wasted space) is minimal.
  51  *  - Severe external fragmentation (unused buffers on the free list) is
  52  *    unlikely.
  53  *
  54  * Segregating objects by size eliminates one source of external fragmentation,
  55  * and according to Bonwick:
  56  *
  57  *   The other reason that slabs reduce external fragmentation is that all
  58  *   objects in a slab are of the same type, so they have the same lifetime
  59  *   distribution. The resulting segregation of short-lived and long-lived
  60  *   objects at slab granularity reduces the likelihood of an entire page being
  61  *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
  62  *
  63  * While unlikely, severe external fragmentation remains possible. Clients that
  64  * allocate both short- and long-lived objects from the same cache cannot
  65  * anticipate the distribution of long-lived objects within the allocator's slab
  66  * implementation. Even a small percentage of long-lived objects distributed
  67  * randomly across many slabs can lead to a worst case scenario where the client
  68  * frees the majority of its objects and the system gets back almost none of the
  69  * slabs. Despite the client doing what it reasonably can to help the system
  70  * reclaim memory, the allocator cannot shake free enough slabs because of
  71  * lonely allocations stubbornly hanging on. Although the allocator is in a
  72  * position to diagnose the fragmentation, there is nothing that the allocator
  73  * by itself can do about it. It only takes a single allocated object to prevent
  74  * an entire slab from being reclaimed, and any object handed out by
  75  * kmem_cache_alloc() is by definition in the client's control. Conversely,
  76  * although the client is in a position to move a long-lived object, it has no
  77  * way of knowing if the object is causing fragmentation, and if so, where to
  78  * move it. A solution necessarily requires further cooperation between the
  79  * allocator and the client.
  80  *
  81  * 2. Move Callback
  82  *
  83  * The kmem slab consolidator therefore adds a move callback to the
  84  * allocator/client interface, improving worst-case external fragmentation in
  85  * kmem caches that supply a function to move objects from one memory location
  86  * to another. In a situation of low memory kmem attempts to consolidate all of
  87  * a cache's slabs at once; otherwise it works slowly to bring external
  88  * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
  89  * thereby helping to avoid a low memory situation in the future.
  90  *
  91  * The callback has the following signature:
  92  *
  93  *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
  94  *
  95  * It supplies the kmem client with two addresses: the allocated object that
  96  * kmem wants to move and a buffer selected by kmem for the client to use as the
  97  * copy destination. The callback is kmem's way of saying "Please get off of
  98  * this buffer and use this one instead." kmem knows where it wants to move the
  99  * object in order to best reduce fragmentation. All the client needs to know
 100  * about the second argument (void *new) is that it is an allocated, constructed
 101  * object ready to take the contents of the old object. When the move function
 102  * is called, the system is likely to be low on memory, and the new object
 103  * spares the client from having to worry about allocating memory for the
 104  * requested move. The third argument supplies the size of the object, in case a
 105  * single move function handles multiple caches whose objects differ only in
 106  * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
 107  * user argument passed to the constructor, destructor, and reclaim functions is
 108  * also passed to the move callback.
 109  *
 110  * 2.1 Setting the Move Callback
 111  *
 112  * The client sets the move callback after creating the cache and before
 113  * allocating from it:
 114  *
 115  *      object_cache = kmem_cache_create(...);
 116  *      kmem_cache_set_move(object_cache, object_move);
 117  *
 118  * 2.2 Move Callback Return Values
 119  *
 120  * Only the client knows about its own data and when is a good time to move it.
 121  * The client is cooperating with kmem to return unused memory to the system,
 122  * and kmem respectfully accepts this help at the client's convenience. When
 123  * asked to move an object, the client can respond with any of the following:
 124  *
 125  *   typedef enum kmem_cbrc {
 126  *           KMEM_CBRC_YES,
 127  *           KMEM_CBRC_NO,
 128  *           KMEM_CBRC_LATER,
 129  *           KMEM_CBRC_DONT_NEED,
 130  *           KMEM_CBRC_DONT_KNOW
 131  *   } kmem_cbrc_t;
 132  *
 133  * The client must not explicitly kmem_cache_free() either of the objects passed
 134  * to the callback, since kmem wants to free them directly to the slab layer
 135  * (bypassing the per-CPU magazine layer). The response tells kmem which of the
 136  * objects to free:
 137  *
 138  *       YES: (Did it) The client moved the object, so kmem frees the old one.
 139  *        NO: (Never) The client refused, so kmem frees the new object (the
 140  *            unused copy destination). kmem also marks the slab of the old
 141  *            object so as not to bother the client with further callbacks for
 142  *            that object as long as the slab remains on the partial slab list.
 143  *            (The system won't be getting the slab back as long as the
 144  *            immovable object holds it hostage, so there's no point in moving
 145  *            any of its objects.)
 146  *     LATER: The client is using the object and cannot move it now, so kmem
 147  *            frees the new object (the unused copy destination). kmem still
 148  *            attempts to move other objects off the slab, since it expects to
 149  *            succeed in clearing the slab in a later callback. The client
 150  *            should use LATER instead of NO if the object is likely to become
 151  *            movable very soon.
 152  * DONT_NEED: The client no longer needs the object, so kmem frees the old along
 153  *            with the new object (the unused copy destination). This response
 154  *            is the client's opportunity to be a model citizen and give back as
 155  *            much as it can.
 156  * DONT_KNOW: The client does not know about the object because
 157  *            a) the client has just allocated the object and not yet put it
 158  *               wherever it expects to find known objects
 159  *            b) the client has removed the object from wherever it expects to
 160  *               find known objects and is about to free it, or
 161  *            c) the client has freed the object.
 162  *            In all these cases (a, b, and c) kmem frees the new object (the
 163  *            unused copy destination).  In the first case, the object is in
 164  *            use and the correct action is that for LATER; in the latter two
 165  *            cases, we know that the object is either freed or about to be
 166  *            freed, in which case it is either already in a magazine or about
 167  *            to be in one.  In these cases, we know that the object will either
 168  *            be reallocated and reused, or it will end up in a full magazine
 169  *            that will be reaped (thereby liberating the slab).  Because it
 170  *            is prohibitively expensive to differentiate these cases, and
 171  *            because the defrag code is executed when we're low on memory
 172  *            (thereby biasing the system to reclaim full magazines) we treat
 173  *            all DONT_KNOW cases as LATER and rely on cache reaping to
 174  *            generally clean up full magazines.  While we take the same action
 175  *            for these cases, we maintain their semantic distinction:  if
 176  *            defragmentation is not occurring, it is useful to know if this
 177  *            is due to objects in use (LATER) or objects in an unknown state
 178  *            of transition (DONT_KNOW).
 179  *
 180  * 2.3 Object States
 181  *
 182  * Neither kmem nor the client can be assumed to know the object's whereabouts
 183  * at the time of the callback. An object belonging to a kmem cache may be in
 184  * any of the following states:
 185  *
 186  * 1. Uninitialized on the slab
 187  * 2. Allocated from the slab but not constructed (still uninitialized)
 188  * 3. Allocated from the slab, constructed, but not yet ready for business
 189  *    (not in a valid state for the move callback)
 190  * 4. In use (valid and known to the client)
 191  * 5. About to be freed (no longer in a valid state for the move callback)
 192  * 6. Freed to a magazine (still constructed)
 193  * 7. Allocated from a magazine, not yet ready for business (not in a valid
 194  *    state for the move callback), and about to return to state #4
 195  * 8. Deconstructed on a magazine that is about to be freed
 196  * 9. Freed to the slab
 197  *
 198  * Since the move callback may be called at any time while the object is in any
 199  * of the above states (except state #1), the client needs a safe way to
 200  * determine whether or not it knows about the object. Specifically, the client
 201  * needs to know whether or not the object is in state #4, the only state in
 202  * which a move is valid. If the object is in any other state, the client should
 203  * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
 204  * the object's fields.
 205  *
 206  * Note that although an object may be in state #4 when kmem initiates the move
 207  * request, the object may no longer be in that state by the time kmem actually
 208  * calls the move function. Not only does the client free objects
 209  * asynchronously, kmem itself puts move requests on a queue where thay are
 210  * pending until kmem processes them from another context. Also, objects freed
 211  * to a magazine appear allocated from the point of view of the slab layer, so
 212  * kmem may even initiate requests for objects in a state other than state #4.
 213  *
 214  * 2.3.1 Magazine Layer
 215  *
 216  * An important insight revealed by the states listed above is that the magazine
 217  * layer is populated only by kmem_cache_free(). Magazines of constructed
 218  * objects are never populated directly from the slab layer (which contains raw,
 219  * unconstructed objects). Whenever an allocation request cannot be satisfied
 220  * from the magazine layer, the magazines are bypassed and the request is
 221  * satisfied from the slab layer (creating a new slab if necessary). kmem calls
 222  * the object constructor only when allocating from the slab layer, and only in
 223  * response to kmem_cache_alloc() or to prepare the destination buffer passed in
 224  * the move callback. kmem does not preconstruct objects in anticipation of
 225  * kmem_cache_alloc().
 226  *
 227  * 2.3.2 Object Constructor and Destructor
 228  *
 229  * If the client supplies a destructor, it must be valid to call the destructor
 230  * on a newly created object (immediately after the constructor).
 231  *
 232  * 2.4 Recognizing Known Objects
 233  *
 234  * There is a simple test to determine safely whether or not the client knows
 235  * about a given object in the move callback. It relies on the fact that kmem
 236  * guarantees that the object of the move callback has only been touched by the
 237  * client itself or else by kmem. kmem does this by ensuring that none of the
 238  * cache's slabs are freed to the virtual memory (VM) subsystem while a move
 239  * callback is pending. When the last object on a slab is freed, if there is a
 240  * pending move, kmem puts the slab on a per-cache dead list and defers freeing
 241  * slabs on that list until all pending callbacks are completed. That way,
 242  * clients can be certain that the object of a move callback is in one of the
 243  * states listed above, making it possible to distinguish known objects (in
 244  * state #4) using the two low order bits of any pointer member (with the
 245  * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
 246  * platforms).
 247  *
 248  * The test works as long as the client always transitions objects from state #4
 249  * (known, in use) to state #5 (about to be freed, invalid) by setting the low
 250  * order bit of the client-designated pointer member. Since kmem only writes
 251  * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
 252  * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
 253  * guaranteed to set at least one of the two low order bits. Therefore, given an
 254  * object with a back pointer to a 'container_t *o_container', the client can
 255  * test
 256  *
 257  *      container_t *container = object->o_container;
 258  *      if ((uintptr_t)container & 0x3) {
 259  *              return (KMEM_CBRC_DONT_KNOW);
 260  *      }
 261  *
 262  * Typically, an object will have a pointer to some structure with a list or
 263  * hash where objects from the cache are kept while in use. Assuming that the
 264  * client has some way of knowing that the container structure is valid and will
 265  * not go away during the move, and assuming that the structure includes a lock
 266  * to protect whatever collection is used, then the client would continue as
 267  * follows:
 268  *
 269  *      // Ensure that the container structure does not go away.
 270  *      if (container_hold(container) == 0) {
 271  *              return (KMEM_CBRC_DONT_KNOW);
 272  *      }
 273  *      mutex_enter(&container->c_objects_lock);
 274  *      if (container != object->o_container) {
 275  *              mutex_exit(&container->c_objects_lock);
 276  *              container_rele(container);
 277  *              return (KMEM_CBRC_DONT_KNOW);
 278  *      }
 279  *
 280  * At this point the client knows that the object cannot be freed as long as
 281  * c_objects_lock is held. Note that after acquiring the lock, the client must
 282  * recheck the o_container pointer in case the object was removed just before
 283  * acquiring the lock.
 284  *
 285  * When the client is about to free an object, it must first remove that object
 286  * from the list, hash, or other structure where it is kept. At that time, to
 287  * mark the object so it can be distinguished from the remaining, known objects,
 288  * the client sets the designated low order bit:
 289  *
 290  *      mutex_enter(&container->c_objects_lock);
 291  *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
 292  *      list_remove(&container->c_objects, object);
 293  *      mutex_exit(&container->c_objects_lock);
 294  *
 295  * In the common case, the object is freed to the magazine layer, where it may
 296  * be reused on a subsequent allocation without the overhead of calling the
 297  * constructor. While in the magazine it appears allocated from the point of
 298  * view of the slab layer, making it a candidate for the move callback. Most
 299  * objects unrecognized by the client in the move callback fall into this
 300  * category and are cheaply distinguished from known objects by the test
 301  * described earlier. Because searching magazines is prohibitively expensive
 302  * for kmem, clients that do not mark freed objects (and therefore return
 303  * KMEM_CBRC_DONT_KNOW for large numbers of objects) may find defragmentation
 304  * efficacy reduced.
 305  *
 306  * Invalidating the designated pointer member before freeing the object marks
 307  * the object to be avoided in the callback, and conversely, assigning a valid
 308  * value to the designated pointer member after allocating the object makes the
 309  * object fair game for the callback:
 310  *
 311  *      ... allocate object ...
 312  *      ... set any initial state not set by the constructor ...
 313  *
 314  *      mutex_enter(&container->c_objects_lock);
 315  *      list_insert_tail(&container->c_objects, object);
 316  *      membar_producer();
 317  *      object->o_container = container;
 318  *      mutex_exit(&container->c_objects_lock);
 319  *
 320  * Note that everything else must be valid before setting o_container makes the
 321  * object fair game for the move callback. The membar_producer() call ensures
 322  * that all the object's state is written to memory before setting the pointer
 323  * that transitions the object from state #3 or #7 (allocated, constructed, not
 324  * yet in use) to state #4 (in use, valid). That's important because the move
 325  * function has to check the validity of the pointer before it can safely
 326  * acquire the lock protecting the collection where it expects to find known
 327  * objects.
 328  *
 329  * This method of distinguishing known objects observes the usual symmetry:
 330  * invalidating the designated pointer is the first thing the client does before
 331  * freeing the object, and setting the designated pointer is the last thing the
 332  * client does after allocating the object. Of course, the client is not
 333  * required to use this method. Fundamentally, how the client recognizes known
 334  * objects is completely up to the client, but this method is recommended as an
 335  * efficient and safe way to take advantage of the guarantees made by kmem. If
 336  * the entire object is arbitrary data without any markable bits from a suitable
 337  * pointer member, then the client must find some other method, such as
 338  * searching a hash table of known objects.
 339  *
 340  * 2.5 Preventing Objects From Moving
 341  *
 342  * Besides a way to distinguish known objects, the other thing that the client
 343  * needs is a strategy to ensure that an object will not move while the client
 344  * is actively using it. The details of satisfying this requirement tend to be
 345  * highly cache-specific. It might seem that the same rules that let a client
 346  * remove an object safely should also decide when an object can be moved
 347  * safely. However, any object state that makes a removal attempt invalid is
 348  * likely to be long-lasting for objects that the client does not expect to
 349  * remove. kmem knows nothing about the object state and is equally likely (from
 350  * the client's point of view) to request a move for any object in the cache,
 351  * whether prepared for removal or not. Even a low percentage of objects stuck
 352  * in place by unremovability will defeat the consolidator if the stuck objects
 353  * are the same long-lived allocations likely to hold slabs hostage.
 354  * Fundamentally, the consolidator is not aimed at common cases. Severe external
 355  * fragmentation is a worst case scenario manifested as sparsely allocated
 356  * slabs, by definition a low percentage of the cache's objects. When deciding
 357  * what makes an object movable, keep in mind the goal of the consolidator: to
 358  * bring worst-case external fragmentation within the limits guaranteed for
 359  * internal fragmentation. Removability is a poor criterion if it is likely to
 360  * exclude more than an insignificant percentage of objects for long periods of
 361  * time.
 362  *
 363  * A tricky general solution exists, and it has the advantage of letting you
 364  * move any object at almost any moment, practically eliminating the likelihood
 365  * that an object can hold a slab hostage. However, if there is a cache-specific
 366  * way to ensure that an object is not actively in use in the vast majority of
 367  * cases, a simpler solution that leverages this cache-specific knowledge is
 368  * preferred.
 369  *
 370  * 2.5.1 Cache-Specific Solution
 371  *
 372  * As an example of a cache-specific solution, the ZFS znode cache takes
 373  * advantage of the fact that the vast majority of znodes are only being
 374  * referenced from the DNLC. (A typical case might be a few hundred in active
 375  * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
 376  * client has established that it recognizes the znode and can access its fields
 377  * safely (using the method described earlier), it then tests whether the znode
 378  * is referenced by anything other than the DNLC. If so, it assumes that the
 379  * znode may be in active use and is unsafe to move, so it drops its locks and
 380  * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
 381  * else znodes are used, no change is needed to protect against the possibility
 382  * of the znode moving. The disadvantage is that it remains possible for an
 383  * application to hold a znode slab hostage with an open file descriptor.
 384  * However, this case ought to be rare and the consolidator has a way to deal
 385  * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
 386  * object, kmem eventually stops believing it and treats the slab as if the
 387  * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
 388  * then focus on getting it off of the partial slab list by allocating rather
 389  * than freeing all of its objects. (Either way of getting a slab off the
 390  * free list reduces fragmentation.)
 391  *
 392  * 2.5.2 General Solution
 393  *
 394  * The general solution, on the other hand, requires an explicit hold everywhere
 395  * the object is used to prevent it from moving. To keep the client locking
 396  * strategy as uncomplicated as possible, kmem guarantees the simplifying
 397  * assumption that move callbacks are sequential, even across multiple caches.
 398  * Internally, a global queue processed by a single thread supports all caches
 399  * implementing the callback function. No matter how many caches supply a move
 400  * function, the consolidator never moves more than one object at a time, so the
 401  * client does not have to worry about tricky lock ordering involving several
 402  * related objects from different kmem caches.
 403  *
 404  * The general solution implements the explicit hold as a read-write lock, which
 405  * allows multiple readers to access an object from the cache simultaneously
 406  * while a single writer is excluded from moving it. A single rwlock for the
 407  * entire cache would lock out all threads from using any of the cache's objects
 408  * even though only a single object is being moved, so to reduce contention,
 409  * the client can fan out the single rwlock into an array of rwlocks hashed by
 410  * the object address, making it probable that moving one object will not
 411  * prevent other threads from using a different object. The rwlock cannot be a
 412  * member of the object itself, because the possibility of the object moving
 413  * makes it unsafe to access any of the object's fields until the lock is
 414  * acquired.
 415  *
 416  * Assuming a small, fixed number of locks, it's possible that multiple objects
 417  * will hash to the same lock. A thread that needs to use multiple objects in
 418  * the same function may acquire the same lock multiple times. Since rwlocks are
 419  * reentrant for readers, and since there is never more than a single writer at
 420  * a time (assuming that the client acquires the lock as a writer only when
 421  * moving an object inside the callback), there would seem to be no problem.
 422  * However, a client locking multiple objects in the same function must handle
 423  * one case of potential deadlock: Assume that thread A needs to prevent both
 424  * object 1 and object 2 from moving, and thread B, the callback, meanwhile
 425  * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
 426  * same lock, that thread A will acquire the lock for object 1 as a reader
 427  * before thread B sets the lock's write-wanted bit, preventing thread A from
 428  * reacquiring the lock for object 2 as a reader. Unable to make forward
 429  * progress, thread A will never release the lock for object 1, resulting in
 430  * deadlock.
 431  *
 432  * There are two ways of avoiding the deadlock just described. The first is to
 433  * use rw_tryenter() rather than rw_enter() in the callback function when
 434  * attempting to acquire the lock as a writer. If tryenter discovers that the
 435  * same object (or another object hashed to the same lock) is already in use, it
 436  * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
 437  * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
 438  * since it allows a thread to acquire the lock as a reader in spite of a
 439  * waiting writer. This second approach insists on moving the object now, no
 440  * matter how many readers the move function must wait for in order to do so,
 441  * and could delay the completion of the callback indefinitely (blocking
 442  * callbacks to other clients). In practice, a less insistent callback using
 443  * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
 444  * little reason to use anything else.
 445  *
 446  * Avoiding deadlock is not the only problem that an implementation using an
 447  * explicit hold needs to solve. Locking the object in the first place (to
 448  * prevent it from moving) remains a problem, since the object could move
 449  * between the time you obtain a pointer to the object and the time you acquire
 450  * the rwlock hashed to that pointer value. Therefore the client needs to
 451  * recheck the value of the pointer after acquiring the lock, drop the lock if
 452  * the value has changed, and try again. This requires a level of indirection:
 453  * something that points to the object rather than the object itself, that the
 454  * client can access safely while attempting to acquire the lock. (The object
 455  * itself cannot be referenced safely because it can move at any time.)
 456  * The following lock-acquisition function takes whatever is safe to reference
 457  * (arg), follows its pointer to the object (using function f), and tries as
 458  * often as necessary to acquire the hashed lock and verify that the object
 459  * still has not moved:
 460  *
 461  *      object_t *
 462  *      object_hold(object_f f, void *arg)
 463  *      {
 464  *              object_t *op;
 465  *
 466  *              op = f(arg);
 467  *              if (op == NULL) {
 468  *                      return (NULL);
 469  *              }
 470  *
 471  *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
 472  *              while (op != f(arg)) {
 473  *                      rw_exit(OBJECT_RWLOCK(op));
 474  *                      op = f(arg);
 475  *                      if (op == NULL) {
 476  *                              break;
 477  *                      }
 478  *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
 479  *              }
 480  *
 481  *              return (op);
 482  *      }
 483  *
 484  * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
 485  * lock reacquisition loop, while necessary, almost never executes. The function
 486  * pointer f (used to obtain the object pointer from arg) has the following type
 487  * definition:
 488  *
 489  *      typedef object_t *(*object_f)(void *arg);
 490  *
 491  * An object_f implementation is likely to be as simple as accessing a structure
 492  * member:
 493  *
 494  *      object_t *
 495  *      s_object(void *arg)
 496  *      {
 497  *              something_t *sp = arg;
 498  *              return (sp->s_object);
 499  *      }
 500  *
 501  * The flexibility of a function pointer allows the path to the object to be
 502  * arbitrarily complex and also supports the notion that depending on where you
 503  * are using the object, you may need to get it from someplace different.
 504  *
 505  * The function that releases the explicit hold is simpler because it does not
 506  * have to worry about the object moving:
 507  *
 508  *      void
 509  *      object_rele(object_t *op)
 510  *      {
 511  *              rw_exit(OBJECT_RWLOCK(op));
 512  *      }
 513  *
 514  * The caller is spared these details so that obtaining and releasing an
 515  * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
 516  * of object_hold() only needs to know that the returned object pointer is valid
 517  * if not NULL and that the object will not move until released.
 518  *
 519  * Although object_hold() prevents an object from moving, it does not prevent it
 520  * from being freed. The caller must take measures before calling object_hold()
 521  * (afterwards is too late) to ensure that the held object cannot be freed. The
 522  * caller must do so without accessing the unsafe object reference, so any lock
 523  * or reference count used to ensure the continued existence of the object must
 524  * live outside the object itself.
 525  *
 526  * Obtaining a new object is a special case where an explicit hold is impossible
 527  * for the caller. Any function that returns a newly allocated object (either as
 528  * a return value, or as an in-out paramter) must return it already held; after
 529  * the caller gets it is too late, since the object cannot be safely accessed
 530  * without the level of indirection described earlier. The following
 531  * object_alloc() example uses the same code shown earlier to transition a new
 532  * object into the state of being recognized (by the client) as a known object.
 533  * The function must acquire the hold (rw_enter) before that state transition
 534  * makes the object movable:
 535  *
 536  *      static object_t *
 537  *      object_alloc(container_t *container)
 538  *      {
 539  *              object_t *object = kmem_cache_alloc(object_cache, 0);
 540  *              ... set any initial state not set by the constructor ...
 541  *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
 542  *              mutex_enter(&container->c_objects_lock);
 543  *              list_insert_tail(&container->c_objects, object);
 544  *              membar_producer();
 545  *              object->o_container = container;
 546  *              mutex_exit(&container->c_objects_lock);
 547  *              return (object);
 548  *      }
 549  *
 550  * Functions that implicitly acquire an object hold (any function that calls
 551  * object_alloc() to supply an object for the caller) need to be carefully noted
 552  * so that the matching object_rele() is not neglected. Otherwise, leaked holds
 553  * prevent all objects hashed to the affected rwlocks from ever being moved.
 554  *
 555  * The pointer to a held object can be hashed to the holding rwlock even after
 556  * the object has been freed. Although it is possible to release the hold
 557  * after freeing the object, you may decide to release the hold implicitly in
 558  * whatever function frees the object, so as to release the hold as soon as
 559  * possible, and for the sake of symmetry with the function that implicitly
 560  * acquires the hold when it allocates the object. Here, object_free() releases
 561  * the hold acquired by object_alloc(). Its implicit object_rele() forms a
 562  * matching pair with object_hold():
 563  *
 564  *      void
 565  *      object_free(object_t *object)
 566  *      {
 567  *              container_t *container;
 568  *
 569  *              ASSERT(object_held(object));
 570  *              container = object->o_container;
 571  *              mutex_enter(&container->c_objects_lock);
 572  *              object->o_container =
 573  *                  (void *)((uintptr_t)object->o_container | 0x1);
 574  *              list_remove(&container->c_objects, object);
 575  *              mutex_exit(&container->c_objects_lock);
 576  *              object_rele(object);
 577  *              kmem_cache_free(object_cache, object);
 578  *      }
 579  *
 580  * Note that object_free() cannot safely accept an object pointer as an argument
 581  * unless the object is already held. Any function that calls object_free()
 582  * needs to be carefully noted since it similarly forms a matching pair with
 583  * object_hold().
 584  *
 585  * To complete the picture, the following callback function implements the
 586  * general solution by moving objects only if they are currently unheld:
 587  *
 588  *      static kmem_cbrc_t
 589  *      object_move(void *buf, void *newbuf, size_t size, void *arg)
 590  *      {
 591  *              object_t *op = buf, *np = newbuf;
 592  *              container_t *container;
 593  *
 594  *              container = op->o_container;
 595  *              if ((uintptr_t)container & 0x3) {
 596  *                      return (KMEM_CBRC_DONT_KNOW);
 597  *              }
 598  *
 599  *              // Ensure that the container structure does not go away.
 600  *              if (container_hold(container) == 0) {
 601  *                      return (KMEM_CBRC_DONT_KNOW);
 602  *              }
 603  *
 604  *              mutex_enter(&container->c_objects_lock);
 605  *              if (container != op->o_container) {
 606  *                      mutex_exit(&container->c_objects_lock);
 607  *                      container_rele(container);
 608  *                      return (KMEM_CBRC_DONT_KNOW);
 609  *              }
 610  *
 611  *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
 612  *                      mutex_exit(&container->c_objects_lock);
 613  *                      container_rele(container);
 614  *                      return (KMEM_CBRC_LATER);
 615  *              }
 616  *
 617  *              object_move_impl(op, np); // critical section
 618  *              rw_exit(OBJECT_RWLOCK(op));
 619  *
 620  *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
 621  *              list_link_replace(&op->o_link_node, &np->o_link_node);
 622  *              mutex_exit(&container->c_objects_lock);
 623  *              container_rele(container);
 624  *              return (KMEM_CBRC_YES);
 625  *      }
 626  *
 627  * Note that object_move() must invalidate the designated o_container pointer of
 628  * the old object in the same way that object_free() does, since kmem will free
 629  * the object in response to the KMEM_CBRC_YES return value.
 630  *
 631  * The lock order in object_move() differs from object_alloc(), which locks
 632  * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
 633  * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
 634  * not a problem. Holding the lock on the object list in the example above
 635  * through the entire callback not only prevents the object from going away, it
 636  * also allows you to lock the list elsewhere and know that none of its elements
 637  * will move during iteration.
 638  *
 639  * Adding an explicit hold everywhere an object from the cache is used is tricky
 640  * and involves much more change to client code than a cache-specific solution
 641  * that leverages existing state to decide whether or not an object is
 642  * movable. However, this approach has the advantage that no object remains
 643  * immovable for any significant length of time, making it extremely unlikely
 644  * that long-lived allocations can continue holding slabs hostage; and it works
 645  * for any cache.
 646  *
 647  * 3. Consolidator Implementation
 648  *
 649  * Once the client supplies a move function that a) recognizes known objects and
 650  * b) avoids moving objects that are actively in use, the remaining work is up
 651  * to the consolidator to decide which objects to move and when to issue
 652  * callbacks.
 653  *
 654  * The consolidator relies on the fact that a cache's slabs are ordered by
 655  * usage. Each slab has a fixed number of objects. Depending on the slab's
 656  * "color" (the offset of the first object from the beginning of the slab;
 657  * offsets are staggered to mitigate false sharing of cache lines) it is either
 658  * the maximum number of objects per slab determined at cache creation time or
 659  * else the number closest to the maximum that fits within the space remaining
 660  * after the initial offset. A completely allocated slab may contribute some
 661  * internal fragmentation (per-slab overhead) but no external fragmentation, so
 662  * it is of no interest to the consolidator. At the other extreme, slabs whose
 663  * objects have all been freed to the slab are released to the virtual memory
 664  * (VM) subsystem (objects freed to magazines are still allocated as far as the
 665  * slab is concerned). External fragmentation exists when there are slabs
 666  * somewhere between these extremes. A partial slab has at least one but not all
 667  * of its objects allocated. The more partial slabs, and the fewer allocated
 668  * objects on each of them, the higher the fragmentation. Hence the
 669  * consolidator's overall strategy is to reduce the number of partial slabs by
 670  * moving allocated objects from the least allocated slabs to the most allocated
 671  * slabs.
 672  *
 673  * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
 674  * slabs are kept separately in an unordered list. Since the majority of slabs
 675  * tend to be completely allocated (a typical unfragmented cache may have
 676  * thousands of complete slabs and only a single partial slab), separating
 677  * complete slabs improves the efficiency of partial slab ordering, since the
 678  * complete slabs do not affect the depth or balance of the AVL tree. This
 679  * ordered sequence of partial slabs acts as a "free list" supplying objects for
 680  * allocation requests.
 681  *
 682  * Objects are always allocated from the first partial slab in the free list,
 683  * where the allocation is most likely to eliminate a partial slab (by
 684  * completely allocating it). Conversely, when a single object from a completely
 685  * allocated slab is freed to the slab, that slab is added to the front of the
 686  * free list. Since most free list activity involves highly allocated slabs
 687  * coming and going at the front of the list, slabs tend naturally toward the
 688  * ideal order: highly allocated at the front, sparsely allocated at the back.
 689  * Slabs with few allocated objects are likely to become completely free if they
 690  * keep a safe distance away from the front of the free list. Slab misorders
 691  * interfere with the natural tendency of slabs to become completely free or
 692  * completely allocated. For example, a slab with a single allocated object
 693  * needs only a single free to escape the cache; its natural desire is
 694  * frustrated when it finds itself at the front of the list where a second
 695  * allocation happens just before the free could have released it. Another slab
 696  * with all but one object allocated might have supplied the buffer instead, so
 697  * that both (as opposed to neither) of the slabs would have been taken off the
 698  * free list.
 699  *
 700  * Although slabs tend naturally toward the ideal order, misorders allowed by a
 701  * simple list implementation defeat the consolidator's strategy of merging
 702  * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
 703  * needs another way to fix misorders to optimize its callback strategy. One
 704  * approach is to periodically scan a limited number of slabs, advancing a
 705  * marker to hold the current scan position, and to move extreme misorders to
 706  * the front or back of the free list and to the front or back of the current
 707  * scan range. By making consecutive scan ranges overlap by one slab, the least
 708  * allocated slab in the current range can be carried along from the end of one
 709  * scan to the start of the next.
 710  *
 711  * Maintaining partial slabs in an AVL tree relieves kmem of this additional
 712  * task, however. Since most of the cache's activity is in the magazine layer,
 713  * and allocations from the slab layer represent only a startup cost, the
 714  * overhead of maintaining a balanced tree is not a significant concern compared
 715  * to the opportunity of reducing complexity by eliminating the partial slab
 716  * scanner just described. The overhead of an AVL tree is minimized by
 717  * maintaining only partial slabs in the tree and keeping completely allocated
 718  * slabs separately in a list. To avoid increasing the size of the slab
 719  * structure the AVL linkage pointers are reused for the slab's list linkage,
 720  * since the slab will always be either partial or complete, never stored both
 721  * ways at the same time. To further minimize the overhead of the AVL tree the
 722  * compare function that orders partial slabs by usage divides the range of
 723  * allocated object counts into bins such that counts within the same bin are
 724  * considered equal. Binning partial slabs makes it less likely that allocating
 725  * or freeing a single object will change the slab's order, requiring a tree
 726  * reinsertion (an avl_remove() followed by an avl_add(), both potentially
 727  * requiring some rebalancing of the tree). Allocation counts closest to
 728  * completely free and completely allocated are left unbinned (finely sorted) to
 729  * better support the consolidator's strategy of merging slabs at either
 730  * extreme.
 731  *
 732  * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
 733  *
 734  * The consolidator piggybacks on the kmem maintenance thread and is called on
 735  * the same interval as kmem_cache_update(), once per cache every fifteen
 736  * seconds. kmem maintains a running count of unallocated objects in the slab
 737  * layer (cache_bufslab). The consolidator checks whether that number exceeds
 738  * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
 739  * there is a significant number of slabs in the cache (arbitrarily a minimum
 740  * 101 total slabs). Unused objects that have fallen out of the magazine layer's
 741  * working set are included in the assessment, and magazines in the depot are
 742  * reaped if those objects would lift cache_bufslab above the fragmentation
 743  * threshold. Once the consolidator decides that a cache is fragmented, it looks
 744  * for a candidate slab to reclaim, starting at the end of the partial slab free
 745  * list and scanning backwards. At first the consolidator is choosy: only a slab
 746  * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
 747  * single allocated object, regardless of percentage). If there is difficulty
 748  * finding a candidate slab, kmem raises the allocation threshold incrementally,
 749  * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
 750  * external fragmentation (unused objects on the free list) below 12.5% (1/8),
 751  * even in the worst case of every slab in the cache being almost 7/8 allocated.
 752  * The threshold can also be lowered incrementally when candidate slabs are easy
 753  * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
 754  * is no longer fragmented.
 755  *
 756  * 3.2 Generating Callbacks
 757  *
 758  * Once an eligible slab is chosen, a callback is generated for every allocated
 759  * object on the slab, in the hope that the client will move everything off the
 760  * slab and make it reclaimable. Objects selected as move destinations are
 761  * chosen from slabs at the front of the free list. Assuming slabs in the ideal
 762  * order (most allocated at the front, least allocated at the back) and a
 763  * cooperative client, the consolidator will succeed in removing slabs from both
 764  * ends of the free list, completely allocating on the one hand and completely
 765  * freeing on the other. Objects selected as move destinations are allocated in
 766  * the kmem maintenance thread where move requests are enqueued. A separate
 767  * callback thread removes pending callbacks from the queue and calls the
 768  * client. The separate thread ensures that client code (the move function) does
 769  * not interfere with internal kmem maintenance tasks. A map of pending
 770  * callbacks keyed by object address (the object to be moved) is checked to
 771  * ensure that duplicate callbacks are not generated for the same object.
 772  * Allocating the move destination (the object to move to) prevents subsequent
 773  * callbacks from selecting the same destination as an earlier pending callback.
 774  *
 775  * Move requests can also be generated by kmem_cache_reap() when the system is
 776  * desperate for memory and by kmem_cache_move_notify(), called by the client to
 777  * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
 778  * The map of pending callbacks is protected by the same lock that protects the
 779  * slab layer.
 780  *
 781  * When the system is desperate for memory, kmem does not bother to determine
 782  * whether or not the cache exceeds the fragmentation threshold, but tries to
 783  * consolidate as many slabs as possible. Normally, the consolidator chews
 784  * slowly, one sparsely allocated slab at a time during each maintenance
 785  * interval that the cache is fragmented. When desperate, the consolidator
 786  * starts at the last partial slab and enqueues callbacks for every allocated
 787  * object on every partial slab, working backwards until it reaches the first
 788  * partial slab. The first partial slab, meanwhile, advances in pace with the
 789  * consolidator as allocations to supply move destinations for the enqueued
 790  * callbacks use up the highly allocated slabs at the front of the free list.
 791  * Ideally, the overgrown free list collapses like an accordion, starting at
 792  * both ends and ending at the center with a single partial slab.
 793  *
 794  * 3.3 Client Responses
 795  *
 796  * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
 797  * marks the slab that supplied the stuck object non-reclaimable and moves it to
 798  * front of the free list. The slab remains marked as long as it remains on the
 799  * free list, and it appears more allocated to the partial slab compare function
 800  * than any unmarked slab, no matter how many of its objects are allocated.
 801  * Since even one immovable object ties up the entire slab, the goal is to
 802  * completely allocate any slab that cannot be completely freed. kmem does not
 803  * bother generating callbacks to move objects from a marked slab unless the
 804  * system is desperate.
 805  *
 806  * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
 807  * slab. If the client responds LATER too many times, kmem disbelieves and
 808  * treats the response as a NO. The count is cleared when the slab is taken off
 809  * the partial slab list or when the client moves one of the slab's objects.
 810  *
 811  * 4. Observability
 812  *
 813  * A kmem cache's external fragmentation is best observed with 'mdb -k' using
 814  * the ::kmem_slabs dcmd. For a complete description of the command, enter
 815  * '::help kmem_slabs' at the mdb prompt.
 816  */
 817 
 818 #include <sys/kmem_impl.h>
 819 #include <sys/vmem_impl.h>
 820 #include <sys/param.h>
 821 #include <sys/sysmacros.h>
 822 #include <sys/vm.h>
 823 #include <sys/proc.h>
 824 #include <sys/tuneable.h>
 825 #include <sys/systm.h>
 826 #include <sys/cmn_err.h>
 827 #include <sys/debug.h>
 828 #include <sys/sdt.h>
 829 #include <sys/mutex.h>
 830 #include <sys/bitmap.h>
 831 #include <sys/atomic.h>
 832 #include <sys/kobj.h>
 833 #include <sys/disp.h>
 834 #include <vm/seg_kmem.h>
 835 #include <sys/log.h>
 836 #include <sys/callb.h>
 837 #include <sys/taskq.h>
 838 #include <sys/modctl.h>
 839 #include <sys/reboot.h>
 840 #include <sys/id32.h>
 841 #include <sys/zone.h>
 842 #include <sys/netstack.h>
 843 #ifdef  DEBUG
 844 #include <sys/random.h>
 845 #endif
 846 
 847 extern void streams_msg_init(void);
 848 extern int segkp_fromheap;
 849 extern void segkp_cache_free(void);
 850 extern int callout_init_done;
 851 
 852 struct kmem_cache_kstat {
 853         kstat_named_t   kmc_buf_size;
 854         kstat_named_t   kmc_align;
 855         kstat_named_t   kmc_chunk_size;
 856         kstat_named_t   kmc_slab_size;
 857         kstat_named_t   kmc_alloc;
 858         kstat_named_t   kmc_alloc_fail;
 859         kstat_named_t   kmc_free;
 860         kstat_named_t   kmc_depot_alloc;
 861         kstat_named_t   kmc_depot_free;
 862         kstat_named_t   kmc_depot_contention;
 863         kstat_named_t   kmc_slab_alloc;
 864         kstat_named_t   kmc_slab_free;
 865         kstat_named_t   kmc_buf_constructed;
 866         kstat_named_t   kmc_buf_avail;
 867         kstat_named_t   kmc_buf_inuse;
 868         kstat_named_t   kmc_buf_total;
 869         kstat_named_t   kmc_buf_max;
 870         kstat_named_t   kmc_slab_create;
 871         kstat_named_t   kmc_slab_destroy;
 872         kstat_named_t   kmc_vmem_source;
 873         kstat_named_t   kmc_hash_size;
 874         kstat_named_t   kmc_hash_lookup_depth;
 875         kstat_named_t   kmc_hash_rescale;
 876         kstat_named_t   kmc_full_magazines;
 877         kstat_named_t   kmc_empty_magazines;
 878         kstat_named_t   kmc_magazine_size;
 879         kstat_named_t   kmc_reap; /* number of kmem_cache_reap() calls */
 880         kstat_named_t   kmc_defrag; /* attempts to defrag all partial slabs */
 881         kstat_named_t   kmc_scan; /* attempts to defrag one partial slab */
 882         kstat_named_t   kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
 883         kstat_named_t   kmc_move_yes;
 884         kstat_named_t   kmc_move_no;
 885         kstat_named_t   kmc_move_later;
 886         kstat_named_t   kmc_move_dont_need;
 887         kstat_named_t   kmc_move_dont_know; /* obj unrecognized by client ... */
 888         kstat_named_t   kmc_move_hunt_found; /* ... but found in mag layer */
 889         kstat_named_t   kmc_move_slabs_freed; /* slabs freed by consolidator */
 890         kstat_named_t   kmc_move_reclaimable; /* buffers, if consolidator ran */
 891 } kmem_cache_kstat = {
 892         { "buf_size",           KSTAT_DATA_UINT64 },
 893         { "align",              KSTAT_DATA_UINT64 },
 894         { "chunk_size",         KSTAT_DATA_UINT64 },
 895         { "slab_size",          KSTAT_DATA_UINT64 },
 896         { "alloc",              KSTAT_DATA_UINT64 },
 897         { "alloc_fail",         KSTAT_DATA_UINT64 },
 898         { "free",               KSTAT_DATA_UINT64 },
 899         { "depot_alloc",        KSTAT_DATA_UINT64 },
 900         { "depot_free",         KSTAT_DATA_UINT64 },
 901         { "depot_contention",   KSTAT_DATA_UINT64 },
 902         { "slab_alloc",         KSTAT_DATA_UINT64 },
 903         { "slab_free",          KSTAT_DATA_UINT64 },
 904         { "buf_constructed",    KSTAT_DATA_UINT64 },
 905         { "buf_avail",          KSTAT_DATA_UINT64 },
 906         { "buf_inuse",          KSTAT_DATA_UINT64 },
 907         { "buf_total",          KSTAT_DATA_UINT64 },
 908         { "buf_max",            KSTAT_DATA_UINT64 },
 909         { "slab_create",        KSTAT_DATA_UINT64 },
 910         { "slab_destroy",       KSTAT_DATA_UINT64 },
 911         { "vmem_source",        KSTAT_DATA_UINT64 },
 912         { "hash_size",          KSTAT_DATA_UINT64 },
 913         { "hash_lookup_depth",  KSTAT_DATA_UINT64 },
 914         { "hash_rescale",       KSTAT_DATA_UINT64 },
 915         { "full_magazines",     KSTAT_DATA_UINT64 },
 916         { "empty_magazines",    KSTAT_DATA_UINT64 },
 917         { "magazine_size",      KSTAT_DATA_UINT64 },
 918         { "reap",               KSTAT_DATA_UINT64 },
 919         { "defrag",             KSTAT_DATA_UINT64 },
 920         { "scan",               KSTAT_DATA_UINT64 },
 921         { "move_callbacks",     KSTAT_DATA_UINT64 },
 922         { "move_yes",           KSTAT_DATA_UINT64 },
 923         { "move_no",            KSTAT_DATA_UINT64 },
 924         { "move_later",         KSTAT_DATA_UINT64 },
 925         { "move_dont_need",     KSTAT_DATA_UINT64 },
 926         { "move_dont_know",     KSTAT_DATA_UINT64 },
 927         { "move_hunt_found",    KSTAT_DATA_UINT64 },
 928         { "move_slabs_freed",   KSTAT_DATA_UINT64 },
 929         { "move_reclaimable",   KSTAT_DATA_UINT64 },
 930 };
 931 
 932 static kmutex_t kmem_cache_kstat_lock;
 933 
 934 /*
 935  * The default set of caches to back kmem_alloc().
 936  * These sizes should be reevaluated periodically.
 937  *
 938  * We want allocations that are multiples of the coherency granularity
 939  * (64 bytes) to be satisfied from a cache which is a multiple of 64
 940  * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
 941  * the next kmem_cache_size greater than or equal to it must be a
 942  * multiple of 64.
 943  *
 944  * We split the table into two sections:  size <= 4k and size > 4k.  This
 945  * saves a lot of space and cache footprint in our cache tables.
 946  */
 947 static const int kmem_alloc_sizes[] = {
 948         1 * 8,
 949         2 * 8,
 950         3 * 8,
 951         4 * 8,          5 * 8,          6 * 8,          7 * 8,
 952         4 * 16,         5 * 16,         6 * 16,         7 * 16,
 953         4 * 32,         5 * 32,         6 * 32,         7 * 32,
 954         4 * 64,         5 * 64,         6 * 64,         7 * 64,
 955         4 * 128,        5 * 128,        6 * 128,        7 * 128,
 956         P2ALIGN(8192 / 7, 64),
 957         P2ALIGN(8192 / 6, 64),
 958         P2ALIGN(8192 / 5, 64),
 959         P2ALIGN(8192 / 4, 64),
 960         P2ALIGN(8192 / 3, 64),
 961         P2ALIGN(8192 / 2, 64),
 962 };
 963 
 964 static const int kmem_big_alloc_sizes[] = {
 965         2 * 4096,       3 * 4096,
 966         2 * 8192,       3 * 8192,
 967         4 * 8192,       5 * 8192,       6 * 8192,       7 * 8192,
 968         8 * 8192,       9 * 8192,       10 * 8192,      11 * 8192,
 969         12 * 8192,      13 * 8192,      14 * 8192,      15 * 8192,
 970         16 * 8192
 971 };
 972 
 973 #define KMEM_MAXBUF             4096
 974 #define KMEM_BIG_MAXBUF_32BIT   32768
 975 #define KMEM_BIG_MAXBUF         131072
 976 
 977 #define KMEM_BIG_MULTIPLE       4096    /* big_alloc_sizes must be a multiple */
 978 #define KMEM_BIG_SHIFT          12      /* lg(KMEM_BIG_MULTIPLE) */
 979 
 980 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
 981 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
 982 
 983 #define KMEM_ALLOC_TABLE_MAX    (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
 984 static size_t kmem_big_alloc_table_max = 0;     /* # of filled elements */
 985 
 986 static kmem_magtype_t kmem_magtype[] = {
 987         { 1,    8,      3200,   65536   },
 988         { 3,    16,     256,    32768   },
 989         { 7,    32,     64,     16384   },
 990         { 15,   64,     0,      8192    },
 991         { 31,   64,     0,      4096    },
 992         { 47,   64,     0,      2048    },
 993         { 63,   64,     0,      1024    },
 994         { 95,   64,     0,      512     },
 995         { 143,  64,     0,      0       },
 996 };
 997 
 998 static uint32_t kmem_reaping;
 999 static uint32_t kmem_reaping_idspace;
1000 
1001 /*
1002  * kmem tunables
1003  */
1004 clock_t kmem_reap_interval;     /* cache reaping rate [15 * HZ ticks] */
1005 int kmem_depot_contention = 3;  /* max failed tryenters per real interval */
1006 pgcnt_t kmem_reapahead = 0;     /* start reaping N pages before pageout */
1007 int kmem_panic = 1;             /* whether to panic on error */
1008 int kmem_logging = 1;           /* kmem_log_enter() override */
1009 uint32_t kmem_mtbf = 0;         /* mean time between failures [default: off] */
1010 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
1011 size_t kmem_content_log_size;   /* content log size [2% of memory] */
1012 size_t kmem_failure_log_size;   /* failure log [4 pages per CPU] */
1013 size_t kmem_slab_log_size;      /* slab create log [4 pages per CPU] */
1014 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1015 size_t kmem_lite_minsize = 0;   /* minimum buffer size for KMF_LITE */
1016 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1017 int kmem_lite_pcs = 4;          /* number of PCs to store in KMF_LITE mode */
1018 size_t kmem_maxverify;          /* maximum bytes to inspect in debug routines */
1019 size_t kmem_minfirewall;        /* hardware-enforced redzone threshold */
1020 
1021 #ifdef _LP64
1022 size_t  kmem_max_cached = KMEM_BIG_MAXBUF;      /* maximum kmem_alloc cache */
1023 #else
1024 size_t  kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1025 #endif
1026 
1027 #ifdef DEBUG
1028 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1029 #else
1030 int kmem_flags = 0;
1031 #endif
1032 int kmem_ready;
1033 
1034 static kmem_cache_t     *kmem_slab_cache;
1035 static kmem_cache_t     *kmem_bufctl_cache;
1036 static kmem_cache_t     *kmem_bufctl_audit_cache;
1037 
1038 static kmutex_t         kmem_cache_lock;        /* inter-cache linkage only */
1039 static list_t           kmem_caches;
1040 
1041 static taskq_t          *kmem_taskq;
1042 static kmutex_t         kmem_flags_lock;
1043 static vmem_t           *kmem_metadata_arena;
1044 static vmem_t           *kmem_msb_arena;        /* arena for metadata caches */
1045 static vmem_t           *kmem_cache_arena;
1046 static vmem_t           *kmem_hash_arena;
1047 static vmem_t           *kmem_log_arena;
1048 static vmem_t           *kmem_oversize_arena;
1049 static vmem_t           *kmem_va_arena;
1050 static vmem_t           *kmem_default_arena;
1051 static vmem_t           *kmem_firewall_va_arena;
1052 static vmem_t           *kmem_firewall_arena;
1053 
1054 /*
1055  * kmem slab consolidator thresholds (tunables)
1056  */
1057 size_t kmem_frag_minslabs = 101;        /* minimum total slabs */
1058 size_t kmem_frag_numer = 1;             /* free buffers (numerator) */
1059 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1060 /*
1061  * Maximum number of slabs from which to move buffers during a single
1062  * maintenance interval while the system is not low on memory.
1063  */
1064 size_t kmem_reclaim_max_slabs = 1;
1065 /*
1066  * Number of slabs to scan backwards from the end of the partial slab list
1067  * when searching for buffers to relocate.
1068  */
1069 size_t kmem_reclaim_scan_range = 12;
1070 
1071 /* consolidator knobs */
1072 boolean_t kmem_move_noreap;
1073 boolean_t kmem_move_blocked;
1074 boolean_t kmem_move_fulltilt;
1075 boolean_t kmem_move_any_partial;
1076 
1077 #ifdef  DEBUG
1078 /*
1079  * kmem consolidator debug tunables:
1080  * Ensure code coverage by occasionally running the consolidator even when the
1081  * caches are not fragmented (they may never be). These intervals are mean time
1082  * in cache maintenance intervals (kmem_cache_update).
1083  */
1084 uint32_t kmem_mtb_move = 60;    /* defrag 1 slab (~15min) */
1085 uint32_t kmem_mtb_reap = 1800;  /* defrag all slabs (~7.5hrs) */
1086 #endif  /* DEBUG */
1087 
1088 static kmem_cache_t     *kmem_defrag_cache;
1089 static kmem_cache_t     *kmem_move_cache;
1090 static taskq_t          *kmem_move_taskq;
1091 
1092 static void kmem_cache_scan(kmem_cache_t *);
1093 static void kmem_cache_defrag(kmem_cache_t *);
1094 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1095 
1096 
1097 kmem_log_header_t       *kmem_transaction_log;
1098 kmem_log_header_t       *kmem_content_log;
1099 kmem_log_header_t       *kmem_failure_log;
1100 kmem_log_header_t       *kmem_slab_log;
1101 
1102 static int              kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1103 
1104 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller)                       \
1105         if ((count) > 0) {                                           \
1106                 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1107                 pc_t *_e;                                               \
1108                 /* memmove() the old entries down one notch */          \
1109                 for (_e = &_s[(count) - 1]; _e > _s; _e--)               \
1110                         *_e = *(_e - 1);                                \
1111                 *_s = (uintptr_t)(caller);                              \
1112         }
1113 
1114 #define KMERR_MODIFIED  0       /* buffer modified while on freelist */
1115 #define KMERR_REDZONE   1       /* redzone violation (write past end of buf) */
1116 #define KMERR_DUPFREE   2       /* freed a buffer twice */
1117 #define KMERR_BADADDR   3       /* freed a bad (unallocated) address */
1118 #define KMERR_BADBUFTAG 4       /* buftag corrupted */
1119 #define KMERR_BADBUFCTL 5       /* bufctl corrupted */
1120 #define KMERR_BADCACHE  6       /* freed a buffer to the wrong cache */
1121 #define KMERR_BADSIZE   7       /* alloc size != free size */
1122 #define KMERR_BADBASE   8       /* buffer base address wrong */
1123 
1124 struct {
1125         hrtime_t        kmp_timestamp;  /* timestamp of panic */
1126         int             kmp_error;      /* type of kmem error */
1127         void            *kmp_buffer;    /* buffer that induced panic */
1128         void            *kmp_realbuf;   /* real start address for buffer */
1129         kmem_cache_t    *kmp_cache;     /* buffer's cache according to client */
1130         kmem_cache_t    *kmp_realcache; /* actual cache containing buffer */
1131         kmem_slab_t     *kmp_slab;      /* slab accoring to kmem_findslab() */
1132         kmem_bufctl_t   *kmp_bufctl;    /* bufctl */
1133 } kmem_panic_info;
1134 
1135 
1136 static void
1137 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1138 {
1139         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1140         uint64_t *buf = buf_arg;
1141 
1142         while (buf < bufend)
1143                 *buf++ = pattern;
1144 }
1145 
1146 static void *
1147 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1148 {
1149         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1150         uint64_t *buf;
1151 
1152         for (buf = buf_arg; buf < bufend; buf++)
1153                 if (*buf != pattern)
1154                         return (buf);
1155         return (NULL);
1156 }
1157 
1158 static void *
1159 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1160 {
1161         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1162         uint64_t *buf;
1163 
1164         for (buf = buf_arg; buf < bufend; buf++) {
1165                 if (*buf != old) {
1166                         copy_pattern(old, buf_arg,
1167                             (char *)buf - (char *)buf_arg);
1168                         return (buf);
1169                 }
1170                 *buf = new;
1171         }
1172 
1173         return (NULL);
1174 }
1175 
1176 static void
1177 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1178 {
1179         kmem_cache_t *cp;
1180 
1181         mutex_enter(&kmem_cache_lock);
1182         for (cp = list_head(&kmem_caches); cp != NULL;
1183             cp = list_next(&kmem_caches, cp))
1184                 if (tq != NULL)
1185                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1186                             tqflag);
1187                 else
1188                         func(cp);
1189         mutex_exit(&kmem_cache_lock);
1190 }
1191 
1192 static void
1193 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1194 {
1195         kmem_cache_t *cp;
1196 
1197         mutex_enter(&kmem_cache_lock);
1198         for (cp = list_head(&kmem_caches); cp != NULL;
1199             cp = list_next(&kmem_caches, cp)) {
1200                 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1201                         continue;
1202                 if (tq != NULL)
1203                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1204                             tqflag);
1205                 else
1206                         func(cp);
1207         }
1208         mutex_exit(&kmem_cache_lock);
1209 }
1210 
1211 /*
1212  * Debugging support.  Given a buffer address, find its slab.
1213  */
1214 static kmem_slab_t *
1215 kmem_findslab(kmem_cache_t *cp, void *buf)
1216 {
1217         kmem_slab_t *sp;
1218 
1219         mutex_enter(&cp->cache_lock);
1220         for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1221             sp = list_next(&cp->cache_complete_slabs, sp)) {
1222                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1223                         mutex_exit(&cp->cache_lock);
1224                         return (sp);
1225                 }
1226         }
1227         for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1228             sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1229                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1230                         mutex_exit(&cp->cache_lock);
1231                         return (sp);
1232                 }
1233         }
1234         mutex_exit(&cp->cache_lock);
1235 
1236         return (NULL);
1237 }
1238 
1239 static void
1240 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1241 {
1242         kmem_buftag_t *btp = NULL;
1243         kmem_bufctl_t *bcp = NULL;
1244         kmem_cache_t *cp = cparg;
1245         kmem_slab_t *sp;
1246         uint64_t *off;
1247         void *buf = bufarg;
1248 
1249         kmem_logging = 0;       /* stop logging when a bad thing happens */
1250 
1251         kmem_panic_info.kmp_timestamp = gethrtime();
1252 
1253         sp = kmem_findslab(cp, buf);
1254         if (sp == NULL) {
1255                 for (cp = list_tail(&kmem_caches); cp != NULL;
1256                     cp = list_prev(&kmem_caches, cp)) {
1257                         if ((sp = kmem_findslab(cp, buf)) != NULL)
1258                                 break;
1259                 }
1260         }
1261 
1262         if (sp == NULL) {
1263                 cp = NULL;
1264                 error = KMERR_BADADDR;
1265         } else {
1266                 if (cp != cparg)
1267                         error = KMERR_BADCACHE;
1268                 else
1269                         buf = (char *)bufarg - ((uintptr_t)bufarg -
1270                             (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1271                 if (buf != bufarg)
1272                         error = KMERR_BADBASE;
1273                 if (cp->cache_flags & KMF_BUFTAG)
1274                         btp = KMEM_BUFTAG(cp, buf);
1275                 if (cp->cache_flags & KMF_HASH) {
1276                         mutex_enter(&cp->cache_lock);
1277                         for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1278                                 if (bcp->bc_addr == buf)
1279                                         break;
1280                         mutex_exit(&cp->cache_lock);
1281                         if (bcp == NULL && btp != NULL)
1282                                 bcp = btp->bt_bufctl;
1283                         if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1284                             NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1285                             bcp->bc_addr != buf) {
1286                                 error = KMERR_BADBUFCTL;
1287                                 bcp = NULL;
1288                         }
1289                 }
1290         }
1291 
1292         kmem_panic_info.kmp_error = error;
1293         kmem_panic_info.kmp_buffer = bufarg;
1294         kmem_panic_info.kmp_realbuf = buf;
1295         kmem_panic_info.kmp_cache = cparg;
1296         kmem_panic_info.kmp_realcache = cp;
1297         kmem_panic_info.kmp_slab = sp;
1298         kmem_panic_info.kmp_bufctl = bcp;
1299 
1300         printf("kernel memory allocator: ");
1301 
1302         switch (error) {
1303 
1304         case KMERR_MODIFIED:
1305                 printf("buffer modified after being freed\n");
1306                 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1307                 if (off == NULL)        /* shouldn't happen */
1308                         off = buf;
1309                 printf("modification occurred at offset 0x%lx "
1310                     "(0x%llx replaced by 0x%llx)\n",
1311                     (uintptr_t)off - (uintptr_t)buf,
1312                     (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1313                 break;
1314 
1315         case KMERR_REDZONE:
1316                 printf("redzone violation: write past end of buffer\n");
1317                 break;
1318 
1319         case KMERR_BADADDR:
1320                 printf("invalid free: buffer not in cache\n");
1321                 break;
1322 
1323         case KMERR_DUPFREE:
1324                 printf("duplicate free: buffer freed twice\n");
1325                 break;
1326 
1327         case KMERR_BADBUFTAG:
1328                 printf("boundary tag corrupted\n");
1329                 printf("bcp ^ bxstat = %lx, should be %lx\n",
1330                     (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1331                     KMEM_BUFTAG_FREE);
1332                 break;
1333 
1334         case KMERR_BADBUFCTL:
1335                 printf("bufctl corrupted\n");
1336                 break;
1337 
1338         case KMERR_BADCACHE:
1339                 printf("buffer freed to wrong cache\n");
1340                 printf("buffer was allocated from %s,\n", cp->cache_name);
1341                 printf("caller attempting free to %s.\n", cparg->cache_name);
1342                 break;
1343 
1344         case KMERR_BADSIZE:
1345                 printf("bad free: free size (%u) != alloc size (%u)\n",
1346                     KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1347                     KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1348                 break;
1349 
1350         case KMERR_BADBASE:
1351                 printf("bad free: free address (%p) != alloc address (%p)\n",
1352                     bufarg, buf);
1353                 break;
1354         }
1355 
1356         printf("buffer=%p  bufctl=%p  cache: %s\n",
1357             bufarg, (void *)bcp, cparg->cache_name);
1358 
1359         if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1360             error != KMERR_BADBUFCTL) {
1361                 int d;
1362                 timestruc_t ts;
1363                 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1364 
1365                 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1366                 printf("previous transaction on buffer %p:\n", buf);
1367                 printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1368                     (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1369                     (void *)sp, cp->cache_name);
1370                 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1371                         ulong_t off;
1372                         char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1373                         printf("%s+%lx\n", sym ? sym : "?", off);
1374                 }
1375         }
1376         if (kmem_panic > 0)
1377                 panic("kernel heap corruption detected");
1378         if (kmem_panic == 0)
1379                 debug_enter(NULL);
1380         kmem_logging = 1;       /* resume logging */
1381 }
1382 
1383 static kmem_log_header_t *
1384 kmem_log_init(size_t logsize)
1385 {
1386         kmem_log_header_t *lhp;
1387         int nchunks = 4 * max_ncpus;
1388         size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1389         int i;
1390 
1391         /*
1392          * Make sure that lhp->lh_cpu[] is nicely aligned
1393          * to prevent false sharing of cache lines.
1394          */
1395         lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1396         lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1397             NULL, NULL, VM_SLEEP);
1398         bzero(lhp, lhsize);
1399 
1400         mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1401         lhp->lh_nchunks = nchunks;
1402         lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1403         lhp->lh_base = vmem_alloc(kmem_log_arena,
1404             lhp->lh_chunksize * nchunks, VM_SLEEP);
1405         lhp->lh_free = vmem_alloc(kmem_log_arena,
1406             nchunks * sizeof (int), VM_SLEEP);
1407         bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1408 
1409         for (i = 0; i < max_ncpus; i++) {
1410                 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1411                 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1412                 clhp->clh_chunk = i;
1413         }
1414 
1415         for (i = max_ncpus; i < nchunks; i++)
1416                 lhp->lh_free[i] = i;
1417 
1418         lhp->lh_head = max_ncpus;
1419         lhp->lh_tail = 0;
1420 
1421         return (lhp);
1422 }
1423 
1424 static void *
1425 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1426 {
1427         void *logspace;
1428         kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1429 
1430         if (lhp == NULL || kmem_logging == 0 || panicstr)
1431                 return (NULL);
1432 
1433         mutex_enter(&clhp->clh_lock);
1434         clhp->clh_hits++;
1435         if (size > clhp->clh_avail) {
1436                 mutex_enter(&lhp->lh_lock);
1437                 lhp->lh_hits++;
1438                 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1439                 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1440                 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1441                 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1442                 clhp->clh_current = lhp->lh_base +
1443                     clhp->clh_chunk * lhp->lh_chunksize;
1444                 clhp->clh_avail = lhp->lh_chunksize;
1445                 if (size > lhp->lh_chunksize)
1446                         size = lhp->lh_chunksize;
1447                 mutex_exit(&lhp->lh_lock);
1448         }
1449         logspace = clhp->clh_current;
1450         clhp->clh_current += size;
1451         clhp->clh_avail -= size;
1452         bcopy(data, logspace, size);
1453         mutex_exit(&clhp->clh_lock);
1454         return (logspace);
1455 }
1456 
1457 #define KMEM_AUDIT(lp, cp, bcp)                                         \
1458 {                                                                       \
1459         kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);       \
1460         _bcp->bc_timestamp = gethrtime();                            \
1461         _bcp->bc_thread = curthread;                                 \
1462         _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);    \
1463         _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));       \
1464 }
1465 
1466 static void
1467 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1468     kmem_slab_t *sp, void *addr)
1469 {
1470         kmem_bufctl_audit_t bca;
1471 
1472         bzero(&bca, sizeof (kmem_bufctl_audit_t));
1473         bca.bc_addr = addr;
1474         bca.bc_slab = sp;
1475         bca.bc_cache = cp;
1476         KMEM_AUDIT(lp, cp, &bca);
1477 }
1478 
1479 /*
1480  * Create a new slab for cache cp.
1481  */
1482 static kmem_slab_t *
1483 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1484 {
1485         size_t slabsize = cp->cache_slabsize;
1486         size_t chunksize = cp->cache_chunksize;
1487         int cache_flags = cp->cache_flags;
1488         size_t color, chunks;
1489         char *buf, *slab;
1490         kmem_slab_t *sp;
1491         kmem_bufctl_t *bcp;
1492         vmem_t *vmp = cp->cache_arena;
1493 
1494         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1495 
1496         color = cp->cache_color + cp->cache_align;
1497         if (color > cp->cache_maxcolor)
1498                 color = cp->cache_mincolor;
1499         cp->cache_color = color;
1500 
1501         slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1502 
1503         if (slab == NULL)
1504                 goto vmem_alloc_failure;
1505 
1506         ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1507 
1508         /*
1509          * Reverify what was already checked in kmem_cache_set_move(), since the
1510          * consolidator depends (for correctness) on slabs being initialized
1511          * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1512          * clients to distinguish uninitialized memory from known objects).
1513          */
1514         ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1515         if (!(cp->cache_cflags & KMC_NOTOUCH))
1516                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1517 
1518         if (cache_flags & KMF_HASH) {
1519                 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1520                         goto slab_alloc_failure;
1521                 chunks = (slabsize - color) / chunksize;
1522         } else {
1523                 sp = KMEM_SLAB(cp, slab);
1524                 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1525         }
1526 
1527         sp->slab_cache       = cp;
1528         sp->slab_head        = NULL;
1529         sp->slab_refcnt      = 0;
1530         sp->slab_base        = buf = slab + color;
1531         sp->slab_chunks      = chunks;
1532         sp->slab_stuck_offset = (uint32_t)-1;
1533         sp->slab_later_count = 0;
1534         sp->slab_flags = 0;
1535 
1536         ASSERT(chunks > 0);
1537         while (chunks-- != 0) {
1538                 if (cache_flags & KMF_HASH) {
1539                         bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1540                         if (bcp == NULL)
1541                                 goto bufctl_alloc_failure;
1542                         if (cache_flags & KMF_AUDIT) {
1543                                 kmem_bufctl_audit_t *bcap =
1544                                     (kmem_bufctl_audit_t *)bcp;
1545                                 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1546                                 bcap->bc_cache = cp;
1547                         }
1548                         bcp->bc_addr = buf;
1549                         bcp->bc_slab = sp;
1550                 } else {
1551                         bcp = KMEM_BUFCTL(cp, buf);
1552                 }
1553                 if (cache_flags & KMF_BUFTAG) {
1554                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1555                         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1556                         btp->bt_bufctl = bcp;
1557                         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1558                         if (cache_flags & KMF_DEADBEEF) {
1559                                 copy_pattern(KMEM_FREE_PATTERN, buf,
1560                                     cp->cache_verify);
1561                         }
1562                 }
1563                 bcp->bc_next = sp->slab_head;
1564                 sp->slab_head = bcp;
1565                 buf += chunksize;
1566         }
1567 
1568         kmem_log_event(kmem_slab_log, cp, sp, slab);
1569 
1570         return (sp);
1571 
1572 bufctl_alloc_failure:
1573 
1574         while ((bcp = sp->slab_head) != NULL) {
1575                 sp->slab_head = bcp->bc_next;
1576                 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1577         }
1578         kmem_cache_free(kmem_slab_cache, sp);
1579 
1580 slab_alloc_failure:
1581 
1582         vmem_free(vmp, slab, slabsize);
1583 
1584 vmem_alloc_failure:
1585 
1586         kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1587         atomic_inc_64(&cp->cache_alloc_fail);
1588 
1589         return (NULL);
1590 }
1591 
1592 /*
1593  * Destroy a slab.
1594  */
1595 static void
1596 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1597 {
1598         vmem_t *vmp = cp->cache_arena;
1599         void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1600 
1601         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1602         ASSERT(sp->slab_refcnt == 0);
1603 
1604         if (cp->cache_flags & KMF_HASH) {
1605                 kmem_bufctl_t *bcp;
1606                 while ((bcp = sp->slab_head) != NULL) {
1607                         sp->slab_head = bcp->bc_next;
1608                         kmem_cache_free(cp->cache_bufctl_cache, bcp);
1609                 }
1610                 kmem_cache_free(kmem_slab_cache, sp);
1611         }
1612         vmem_free(vmp, slab, cp->cache_slabsize);
1613 }
1614 
1615 static void *
1616 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1617 {
1618         kmem_bufctl_t *bcp, **hash_bucket;
1619         void *buf;
1620         boolean_t new_slab = (sp->slab_refcnt == 0);
1621 
1622         ASSERT(MUTEX_HELD(&cp->cache_lock));
1623         /*
1624          * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1625          * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1626          * slab is newly created.
1627          */
1628         ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1629             (sp == avl_first(&cp->cache_partial_slabs))));
1630         ASSERT(sp->slab_cache == cp);
1631 
1632         cp->cache_slab_alloc++;
1633         cp->cache_bufslab--;
1634         sp->slab_refcnt++;
1635 
1636         bcp = sp->slab_head;
1637         sp->slab_head = bcp->bc_next;
1638 
1639         if (cp->cache_flags & KMF_HASH) {
1640                 /*
1641                  * Add buffer to allocated-address hash table.
1642                  */
1643                 buf = bcp->bc_addr;
1644                 hash_bucket = KMEM_HASH(cp, buf);
1645                 bcp->bc_next = *hash_bucket;
1646                 *hash_bucket = bcp;
1647                 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1648                         KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1649                 }
1650         } else {
1651                 buf = KMEM_BUF(cp, bcp);
1652         }
1653 
1654         ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1655 
1656         if (sp->slab_head == NULL) {
1657                 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1658                 if (new_slab) {
1659                         ASSERT(sp->slab_chunks == 1);
1660                 } else {
1661                         ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1662                         avl_remove(&cp->cache_partial_slabs, sp);
1663                         sp->slab_later_count = 0; /* clear history */
1664                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1665                         sp->slab_stuck_offset = (uint32_t)-1;
1666                 }
1667                 list_insert_head(&cp->cache_complete_slabs, sp);
1668                 cp->cache_complete_slab_count++;
1669                 return (buf);
1670         }
1671 
1672         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1673         /*
1674          * Peek to see if the magazine layer is enabled before
1675          * we prefill.  We're not holding the cpu cache lock,
1676          * so the peek could be wrong, but there's no harm in it.
1677          */
1678         if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1679             (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
1680                 kmem_slab_prefill(cp, sp);
1681                 return (buf);
1682         }
1683 
1684         if (new_slab) {
1685                 avl_add(&cp->cache_partial_slabs, sp);
1686                 return (buf);
1687         }
1688 
1689         /*
1690          * The slab is now more allocated than it was, so the
1691          * order remains unchanged.
1692          */
1693         ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1694         return (buf);
1695 }
1696 
1697 /*
1698  * Allocate a raw (unconstructed) buffer from cp's slab layer.
1699  */
1700 static void *
1701 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1702 {
1703         kmem_slab_t *sp;
1704         void *buf;
1705         boolean_t test_destructor;
1706 
1707         mutex_enter(&cp->cache_lock);
1708         test_destructor = (cp->cache_slab_alloc == 0);
1709         sp = avl_first(&cp->cache_partial_slabs);
1710         if (sp == NULL) {
1711                 ASSERT(cp->cache_bufslab == 0);
1712 
1713                 /*
1714                  * The freelist is empty.  Create a new slab.
1715                  */
1716                 mutex_exit(&cp->cache_lock);
1717                 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1718                         return (NULL);
1719                 }
1720                 mutex_enter(&cp->cache_lock);
1721                 cp->cache_slab_create++;
1722                 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1723                         cp->cache_bufmax = cp->cache_buftotal;
1724                 cp->cache_bufslab += sp->slab_chunks;
1725         }
1726 
1727         buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1728         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1729             (cp->cache_complete_slab_count +
1730             avl_numnodes(&cp->cache_partial_slabs) +
1731             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1732         mutex_exit(&cp->cache_lock);
1733 
1734         if (test_destructor && cp->cache_destructor != NULL) {
1735                 /*
1736                  * On the first kmem_slab_alloc(), assert that it is valid to
1737                  * call the destructor on a newly constructed object without any
1738                  * client involvement.
1739                  */
1740                 if ((cp->cache_constructor == NULL) ||
1741                     cp->cache_constructor(buf, cp->cache_private,
1742                     kmflag) == 0) {
1743                         cp->cache_destructor(buf, cp->cache_private);
1744                 }
1745                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1746                     cp->cache_bufsize);
1747                 if (cp->cache_flags & KMF_DEADBEEF) {
1748                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1749                 }
1750         }
1751 
1752         return (buf);
1753 }
1754 
1755 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1756 
1757 /*
1758  * Free a raw (unconstructed) buffer to cp's slab layer.
1759  */
1760 static void
1761 kmem_slab_free(kmem_cache_t *cp, void *buf)
1762 {
1763         kmem_slab_t *sp;
1764         kmem_bufctl_t *bcp, **prev_bcpp;
1765 
1766         ASSERT(buf != NULL);
1767 
1768         mutex_enter(&cp->cache_lock);
1769         cp->cache_slab_free++;
1770 
1771         if (cp->cache_flags & KMF_HASH) {
1772                 /*
1773                  * Look up buffer in allocated-address hash table.
1774                  */
1775                 prev_bcpp = KMEM_HASH(cp, buf);
1776                 while ((bcp = *prev_bcpp) != NULL) {
1777                         if (bcp->bc_addr == buf) {
1778                                 *prev_bcpp = bcp->bc_next;
1779                                 sp = bcp->bc_slab;
1780                                 break;
1781                         }
1782                         cp->cache_lookup_depth++;
1783                         prev_bcpp = &bcp->bc_next;
1784                 }
1785         } else {
1786                 bcp = KMEM_BUFCTL(cp, buf);
1787                 sp = KMEM_SLAB(cp, buf);
1788         }
1789 
1790         if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1791                 mutex_exit(&cp->cache_lock);
1792                 kmem_error(KMERR_BADADDR, cp, buf);
1793                 return;
1794         }
1795 
1796         if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1797                 /*
1798                  * If this is the buffer that prevented the consolidator from
1799                  * clearing the slab, we can reset the slab flags now that the
1800                  * buffer is freed. (It makes sense to do this in
1801                  * kmem_cache_free(), where the client gives up ownership of the
1802                  * buffer, but on the hot path the test is too expensive.)
1803                  */
1804                 kmem_slab_move_yes(cp, sp, buf);
1805         }
1806 
1807         if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1808                 if (cp->cache_flags & KMF_CONTENTS)
1809                         ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1810                             kmem_log_enter(kmem_content_log, buf,
1811                             cp->cache_contents);
1812                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1813         }
1814 
1815         bcp->bc_next = sp->slab_head;
1816         sp->slab_head = bcp;
1817 
1818         cp->cache_bufslab++;
1819         ASSERT(sp->slab_refcnt >= 1);
1820 
1821         if (--sp->slab_refcnt == 0) {
1822                 /*
1823                  * There are no outstanding allocations from this slab,
1824                  * so we can reclaim the memory.
1825                  */
1826                 if (sp->slab_chunks == 1) {
1827                         list_remove(&cp->cache_complete_slabs, sp);
1828                         cp->cache_complete_slab_count--;
1829                 } else {
1830                         avl_remove(&cp->cache_partial_slabs, sp);
1831                 }
1832 
1833                 cp->cache_buftotal -= sp->slab_chunks;
1834                 cp->cache_bufslab -= sp->slab_chunks;
1835                 /*
1836                  * Defer releasing the slab to the virtual memory subsystem
1837                  * while there is a pending move callback, since we guarantee
1838                  * that buffers passed to the move callback have only been
1839                  * touched by kmem or by the client itself. Since the memory
1840                  * patterns baddcafe (uninitialized) and deadbeef (freed) both
1841                  * set at least one of the two lowest order bits, the client can
1842                  * test those bits in the move callback to determine whether or
1843                  * not it knows about the buffer (assuming that the client also
1844                  * sets one of those low order bits whenever it frees a buffer).
1845                  */
1846                 if (cp->cache_defrag == NULL ||
1847                     (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1848                     !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1849                         cp->cache_slab_destroy++;
1850                         mutex_exit(&cp->cache_lock);
1851                         kmem_slab_destroy(cp, sp);
1852                 } else {
1853                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1854                         /*
1855                          * Slabs are inserted at both ends of the deadlist to
1856                          * distinguish between slabs freed while move callbacks
1857                          * are pending (list head) and a slab freed while the
1858                          * lock is dropped in kmem_move_buffers() (list tail) so
1859                          * that in both cases slab_destroy() is called from the
1860                          * right context.
1861                          */
1862                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1863                                 list_insert_tail(deadlist, sp);
1864                         } else {
1865                                 list_insert_head(deadlist, sp);
1866                         }
1867                         cp->cache_defrag->kmd_deadcount++;
1868                         mutex_exit(&cp->cache_lock);
1869                 }
1870                 return;
1871         }
1872 
1873         if (bcp->bc_next == NULL) {
1874                 /* Transition the slab from completely allocated to partial. */
1875                 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1876                 ASSERT(sp->slab_chunks > 1);
1877                 list_remove(&cp->cache_complete_slabs, sp);
1878                 cp->cache_complete_slab_count--;
1879                 avl_add(&cp->cache_partial_slabs, sp);
1880         } else {
1881                 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1882         }
1883 
1884         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1885             (cp->cache_complete_slab_count +
1886             avl_numnodes(&cp->cache_partial_slabs) +
1887             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1888         mutex_exit(&cp->cache_lock);
1889 }
1890 
1891 /*
1892  * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1893  */
1894 static int
1895 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1896     caddr_t caller)
1897 {
1898         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1899         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1900         uint32_t mtbf;
1901 
1902         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1903                 kmem_error(KMERR_BADBUFTAG, cp, buf);
1904                 return (-1);
1905         }
1906 
1907         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1908 
1909         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1910                 kmem_error(KMERR_BADBUFCTL, cp, buf);
1911                 return (-1);
1912         }
1913 
1914         if (cp->cache_flags & KMF_DEADBEEF) {
1915                 if (!construct && (cp->cache_flags & KMF_LITE)) {
1916                         if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1917                                 kmem_error(KMERR_MODIFIED, cp, buf);
1918                                 return (-1);
1919                         }
1920                         if (cp->cache_constructor != NULL)
1921                                 *(uint64_t *)buf = btp->bt_redzone;
1922                         else
1923                                 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1924                 } else {
1925                         construct = 1;
1926                         if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1927                             KMEM_UNINITIALIZED_PATTERN, buf,
1928                             cp->cache_verify)) {
1929                                 kmem_error(KMERR_MODIFIED, cp, buf);
1930                                 return (-1);
1931                         }
1932                 }
1933         }
1934         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1935 
1936         if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1937             gethrtime() % mtbf == 0 &&
1938             (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1939                 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1940                 if (!construct && cp->cache_destructor != NULL)
1941                         cp->cache_destructor(buf, cp->cache_private);
1942         } else {
1943                 mtbf = 0;
1944         }
1945 
1946         if (mtbf || (construct && cp->cache_constructor != NULL &&
1947             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1948                 atomic_inc_64(&cp->cache_alloc_fail);
1949                 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1950                 if (cp->cache_flags & KMF_DEADBEEF)
1951                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1952                 kmem_slab_free(cp, buf);
1953                 return (1);
1954         }
1955 
1956         if (cp->cache_flags & KMF_AUDIT) {
1957                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1958         }
1959 
1960         if ((cp->cache_flags & KMF_LITE) &&
1961             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
1962                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
1963         }
1964 
1965         return (0);
1966 }
1967 
1968 static int
1969 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
1970 {
1971         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1972         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1973         kmem_slab_t *sp;
1974 
1975         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
1976                 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1977                         kmem_error(KMERR_DUPFREE, cp, buf);
1978                         return (-1);
1979                 }
1980                 sp = kmem_findslab(cp, buf);
1981                 if (sp == NULL || sp->slab_cache != cp)
1982                         kmem_error(KMERR_BADADDR, cp, buf);
1983                 else
1984                         kmem_error(KMERR_REDZONE, cp, buf);
1985                 return (-1);
1986         }
1987 
1988         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1989 
1990         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1991                 kmem_error(KMERR_BADBUFCTL, cp, buf);
1992                 return (-1);
1993         }
1994 
1995         if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
1996                 kmem_error(KMERR_REDZONE, cp, buf);
1997                 return (-1);
1998         }
1999 
2000         if (cp->cache_flags & KMF_AUDIT) {
2001                 if (cp->cache_flags & KMF_CONTENTS)
2002                         bcp->bc_contents = kmem_log_enter(kmem_content_log,
2003                             buf, cp->cache_contents);
2004                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2005         }
2006 
2007         if ((cp->cache_flags & KMF_LITE) &&
2008             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2009                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2010         }
2011 
2012         if (cp->cache_flags & KMF_DEADBEEF) {
2013                 if (cp->cache_flags & KMF_LITE)
2014                         btp->bt_redzone = *(uint64_t *)buf;
2015                 else if (cp->cache_destructor != NULL)
2016                         cp->cache_destructor(buf, cp->cache_private);
2017 
2018                 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2019         }
2020 
2021         return (0);
2022 }
2023 
2024 /*
2025  * Free each object in magazine mp to cp's slab layer, and free mp itself.
2026  */
2027 static void
2028 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2029 {
2030         int round;
2031 
2032         ASSERT(!list_link_active(&cp->cache_link) ||
2033             taskq_member(kmem_taskq, curthread));
2034 
2035         for (round = 0; round < nrounds; round++) {
2036                 void *buf = mp->mag_round[round];
2037 
2038                 if (cp->cache_flags & KMF_DEADBEEF) {
2039                         if (verify_pattern(KMEM_FREE_PATTERN, buf,
2040                             cp->cache_verify) != NULL) {
2041                                 kmem_error(KMERR_MODIFIED, cp, buf);
2042                                 continue;
2043                         }
2044                         if ((cp->cache_flags & KMF_LITE) &&
2045                             cp->cache_destructor != NULL) {
2046                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2047                                 *(uint64_t *)buf = btp->bt_redzone;
2048                                 cp->cache_destructor(buf, cp->cache_private);
2049                                 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2050                         }
2051                 } else if (cp->cache_destructor != NULL) {
2052                         cp->cache_destructor(buf, cp->cache_private);
2053                 }
2054 
2055                 kmem_slab_free(cp, buf);
2056         }
2057         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2058         kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2059 }
2060 
2061 /*
2062  * Allocate a magazine from the depot.
2063  */
2064 static kmem_magazine_t *
2065 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2066 {
2067         kmem_magazine_t *mp;
2068 
2069         /*
2070          * If we can't get the depot lock without contention,
2071          * update our contention count.  We use the depot
2072          * contention rate to determine whether we need to
2073          * increase the magazine size for better scalability.
2074          */
2075         if (!mutex_tryenter(&cp->cache_depot_lock)) {
2076                 mutex_enter(&cp->cache_depot_lock);
2077                 cp->cache_depot_contention++;
2078         }
2079 
2080         if ((mp = mlp->ml_list) != NULL) {
2081                 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2082                 mlp->ml_list = mp->mag_next;
2083                 if (--mlp->ml_total < mlp->ml_min)
2084                         mlp->ml_min = mlp->ml_total;
2085                 mlp->ml_alloc++;
2086         }
2087 
2088         mutex_exit(&cp->cache_depot_lock);
2089 
2090         return (mp);
2091 }
2092 
2093 /*
2094  * Free a magazine to the depot.
2095  */
2096 static void
2097 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2098 {
2099         mutex_enter(&cp->cache_depot_lock);
2100         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2101         mp->mag_next = mlp->ml_list;
2102         mlp->ml_list = mp;
2103         mlp->ml_total++;
2104         mutex_exit(&cp->cache_depot_lock);
2105 }
2106 
2107 /*
2108  * Update the working set statistics for cp's depot.
2109  */
2110 static void
2111 kmem_depot_ws_update(kmem_cache_t *cp)
2112 {
2113         mutex_enter(&cp->cache_depot_lock);
2114         cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2115         cp->cache_full.ml_min = cp->cache_full.ml_total;
2116         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2117         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2118         mutex_exit(&cp->cache_depot_lock);
2119 }
2120 
2121 /*
2122  * Set the working set statistics for cp's depot to zero.  (Everything is
2123  * eligible for reaping.)
2124  */
2125 static void
2126 kmem_depot_ws_zero(kmem_cache_t *cp)
2127 {
2128         mutex_enter(&cp->cache_depot_lock);
2129         cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2130         cp->cache_full.ml_min = cp->cache_full.ml_total;
2131         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2132         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2133         mutex_exit(&cp->cache_depot_lock);
2134 }
2135 
2136 /*
2137  * The number of bytes to reap before we call kpreempt(). The default (1MB)
2138  * causes us to preempt reaping up to hundreds of times per second. Using a
2139  * larger value (1GB) causes this to have virtually no effect.
2140  */
2141 size_t kmem_reap_preempt_bytes = 1024 * 1024;
2142 
2143 /*
2144  * Reap all magazines that have fallen out of the depot's working set.
2145  */
2146 static void
2147 kmem_depot_ws_reap(kmem_cache_t *cp)
2148 {
2149         size_t bytes = 0;
2150         long reap;
2151         kmem_magazine_t *mp;
2152 
2153         ASSERT(!list_link_active(&cp->cache_link) ||
2154             taskq_member(kmem_taskq, curthread));
2155 
2156         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2157         while (reap-- &&
2158             (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL) {
2159                 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2160                 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2161                 if (bytes > kmem_reap_preempt_bytes) {
2162                         kpreempt(KPREEMPT_SYNC);
2163                         bytes = 0;
2164                 }
2165         }
2166 
2167         reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2168         while (reap-- &&
2169             (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL) {
2170                 kmem_magazine_destroy(cp, mp, 0);
2171                 bytes += cp->cache_magtype->mt_magsize * cp->cache_bufsize;
2172                 if (bytes > kmem_reap_preempt_bytes) {
2173                         kpreempt(KPREEMPT_SYNC);
2174                         bytes = 0;
2175                 }
2176         }
2177 }
2178 
2179 static void
2180 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2181 {
2182         ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2183             (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2184         ASSERT(ccp->cc_magsize > 0);
2185 
2186         ccp->cc_ploaded = ccp->cc_loaded;
2187         ccp->cc_prounds = ccp->cc_rounds;
2188         ccp->cc_loaded = mp;
2189         ccp->cc_rounds = rounds;
2190 }
2191 
2192 /*
2193  * Intercept kmem alloc/free calls during crash dump in order to avoid
2194  * changing kmem state while memory is being saved to the dump device.
2195  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2196  * there are no locks because only one CPU calls kmem during a crash
2197  * dump. To enable this feature, first create the associated vmem
2198  * arena with VMC_DUMPSAFE.
2199  */
2200 static void *kmem_dump_start;   /* start of pre-reserved heap */
2201 static void *kmem_dump_end;     /* end of heap area */
2202 static void *kmem_dump_curr;    /* current free heap pointer */
2203 static size_t kmem_dump_size;   /* size of heap area */
2204 
2205 /* append to each buf created in the pre-reserved heap */
2206 typedef struct kmem_dumpctl {
2207         void    *kdc_next;      /* cache dump free list linkage */
2208 } kmem_dumpctl_t;
2209 
2210 #define KMEM_DUMPCTL(cp, buf)   \
2211         ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2212             sizeof (void *)))
2213 
2214 /* set non zero for full report */
2215 uint_t kmem_dump_verbose = 0;
2216 
2217 /* stats for overize heap */
2218 uint_t kmem_dump_oversize_allocs = 0;
2219 uint_t kmem_dump_oversize_max = 0;
2220 
2221 static void
2222 kmem_dumppr(char **pp, char *e, const char *format, ...)
2223 {
2224         char *p = *pp;
2225 
2226         if (p < e) {
2227                 int n;
2228                 va_list ap;
2229 
2230                 va_start(ap, format);
2231                 n = vsnprintf(p, e - p, format, ap);
2232                 va_end(ap);
2233                 *pp = p + n;
2234         }
2235 }
2236 
2237 /*
2238  * Called when dumpadm(1M) configures dump parameters.
2239  */
2240 void
2241 kmem_dump_init(size_t size)
2242 {
2243         /* Our caller ensures size is always set. */
2244         ASSERT3U(size, >, 0);
2245 
2246         if (kmem_dump_start != NULL)
2247                 kmem_free(kmem_dump_start, kmem_dump_size);
2248 
2249         kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2250         kmem_dump_size = size;
2251         kmem_dump_curr = kmem_dump_start;
2252         kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2253         copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2254 }
2255 
2256 /*
2257  * Set flag for each kmem_cache_t if is safe to use alternate dump
2258  * memory. Called just before panic crash dump starts. Set the flag
2259  * for the calling CPU.
2260  */
2261 void
2262 kmem_dump_begin(void)
2263 {
2264         kmem_cache_t *cp;
2265 
2266         ASSERT(panicstr != NULL);
2267 
2268         for (cp = list_head(&kmem_caches); cp != NULL;
2269             cp = list_next(&kmem_caches, cp)) {
2270                 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2271 
2272                 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2273                         cp->cache_flags |= KMF_DUMPDIVERT;
2274                         ccp->cc_flags |= KMF_DUMPDIVERT;
2275                         ccp->cc_dump_rounds = ccp->cc_rounds;
2276                         ccp->cc_dump_prounds = ccp->cc_prounds;
2277                         ccp->cc_rounds = ccp->cc_prounds = -1;
2278                 } else {
2279                         cp->cache_flags |= KMF_DUMPUNSAFE;
2280                         ccp->cc_flags |= KMF_DUMPUNSAFE;
2281                 }
2282         }
2283 }
2284 
2285 /*
2286  * finished dump intercept
2287  * print any warnings on the console
2288  * return verbose information to dumpsys() in the given buffer
2289  */
2290 size_t
2291 kmem_dump_finish(char *buf, size_t size)
2292 {
2293         int percent = 0;
2294         size_t used;
2295         char *e = buf + size;
2296         char *p = buf;
2297 
2298         if (kmem_dump_curr == kmem_dump_end) {
2299                 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2300                     "bytes: kmem state in dump may be inconsistent",
2301                     kmem_dump_size);
2302         }
2303 
2304         if (kmem_dump_verbose == 0)
2305                 return (0);
2306 
2307         used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2308         percent = (used * 100) / kmem_dump_size;
2309 
2310         kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2311         kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2312         kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2313         kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2314             kmem_dump_oversize_allocs);
2315         kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2316             kmem_dump_oversize_max);
2317 
2318         /* return buffer size used */
2319         if (p < e)
2320                 bzero(p, e - p);
2321         return (p - buf);
2322 }
2323 
2324 /*
2325  * Allocate a constructed object from alternate dump memory.
2326  */
2327 void *
2328 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2329 {
2330         void *buf;
2331         void *curr;
2332         char *bufend;
2333 
2334         /* return a constructed object */
2335         if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2336                 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2337                 return (buf);
2338         }
2339 
2340         /* create a new constructed object */
2341         curr = kmem_dump_curr;
2342         buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2343         bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2344 
2345         /* hat layer objects cannot cross a page boundary */
2346         if (cp->cache_align < PAGESIZE) {
2347                 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2348                 if (bufend > page) {
2349                         bufend += page - (char *)buf;
2350                         buf = (void *)page;
2351                 }
2352         }
2353 
2354         /* fall back to normal alloc if reserved area is used up */
2355         if (bufend > (char *)kmem_dump_end) {
2356                 kmem_dump_curr = kmem_dump_end;
2357                 cp->cache_dump.kd_alloc_fails++;
2358                 return (NULL);
2359         }
2360 
2361         /*
2362          * Must advance curr pointer before calling a constructor that
2363          * may also allocate memory.
2364          */
2365         kmem_dump_curr = bufend;
2366 
2367         /* run constructor */
2368         if (cp->cache_constructor != NULL &&
2369             cp->cache_constructor(buf, cp->cache_private, kmflag)
2370             != 0) {
2371 #ifdef DEBUG
2372                 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2373                     cp->cache_name, (void *)cp);
2374 #endif
2375                 /* reset curr pointer iff no allocs were done */
2376                 if (kmem_dump_curr == bufend)
2377                         kmem_dump_curr = curr;
2378 
2379                 cp->cache_dump.kd_alloc_fails++;
2380                 /* fall back to normal alloc if the constructor fails */
2381                 return (NULL);
2382         }
2383 
2384         return (buf);
2385 }
2386 
2387 /*
2388  * Free a constructed object in alternate dump memory.
2389  */
2390 int
2391 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2392 {
2393         /* save constructed buffers for next time */
2394         if ((char *)buf >= (char *)kmem_dump_start &&
2395             (char *)buf < (char *)kmem_dump_end) {
2396                 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2397                 cp->cache_dump.kd_freelist = buf;
2398                 return (0);
2399         }
2400 
2401         /* just drop buffers that were allocated before dump started */
2402         if (kmem_dump_curr < kmem_dump_end)
2403                 return (0);
2404 
2405         /* fall back to normal free if reserved area is used up */
2406         return (1);
2407 }
2408 
2409 /*
2410  * Allocate a constructed object from cache cp.
2411  */
2412 void *
2413 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2414 {
2415         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2416         kmem_magazine_t *fmp;
2417         void *buf;
2418 
2419         mutex_enter(&ccp->cc_lock);
2420         for (;;) {
2421                 /*
2422                  * If there's an object available in the current CPU's
2423                  * loaded magazine, just take it and return.
2424                  */
2425                 if (ccp->cc_rounds > 0) {
2426                         buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2427                         ccp->cc_alloc++;
2428                         mutex_exit(&ccp->cc_lock);
2429                         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2430                                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2431                                         ASSERT(!(ccp->cc_flags &
2432                                             KMF_DUMPDIVERT));
2433                                         cp->cache_dump.kd_unsafe++;
2434                                 }
2435                                 if ((ccp->cc_flags & KMF_BUFTAG) &&
2436                                     kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2437                                     caller()) != 0) {
2438                                         if (kmflag & KM_NOSLEEP)
2439                                                 return (NULL);
2440                                         mutex_enter(&ccp->cc_lock);
2441                                         continue;
2442                                 }
2443                         }
2444                         return (buf);
2445                 }
2446 
2447                 /*
2448                  * The loaded magazine is empty.  If the previously loaded
2449                  * magazine was full, exchange them and try again.
2450                  */
2451                 if (ccp->cc_prounds > 0) {
2452                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2453                         continue;
2454                 }
2455 
2456                 /*
2457                  * Return an alternate buffer at dump time to preserve
2458                  * the heap.
2459                  */
2460                 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2461                         if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2462                                 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2463                                 /* log it so that we can warn about it */
2464                                 cp->cache_dump.kd_unsafe++;
2465                         } else {
2466                                 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2467                                     NULL) {
2468                                         mutex_exit(&ccp->cc_lock);
2469                                         return (buf);
2470                                 }
2471                                 break;          /* fall back to slab layer */
2472                         }
2473                 }
2474 
2475                 /*
2476                  * If the magazine layer is disabled, break out now.
2477                  */
2478                 if (ccp->cc_magsize == 0)
2479                         break;
2480 
2481                 /*
2482                  * Try to get a full magazine from the depot.
2483                  */
2484                 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2485                 if (fmp != NULL) {
2486                         if (ccp->cc_ploaded != NULL)
2487                                 kmem_depot_free(cp, &cp->cache_empty,
2488                                     ccp->cc_ploaded);
2489                         kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2490                         continue;
2491                 }
2492 
2493                 /*
2494                  * There are no full magazines in the depot,
2495                  * so fall through to the slab layer.
2496                  */
2497                 break;
2498         }
2499         mutex_exit(&ccp->cc_lock);
2500 
2501         /*
2502          * We couldn't allocate a constructed object from the magazine layer,
2503          * so get a raw buffer from the slab layer and apply its constructor.
2504          */
2505         buf = kmem_slab_alloc(cp, kmflag);
2506 
2507         if (buf == NULL)
2508                 return (NULL);
2509 
2510         if (cp->cache_flags & KMF_BUFTAG) {
2511                 /*
2512                  * Make kmem_cache_alloc_debug() apply the constructor for us.
2513                  */
2514                 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2515                 if (rc != 0) {
2516                         if (kmflag & KM_NOSLEEP)
2517                                 return (NULL);
2518                         /*
2519                          * kmem_cache_alloc_debug() detected corruption
2520                          * but didn't panic (kmem_panic <= 0). We should not be
2521                          * here because the constructor failed (indicated by a
2522                          * return code of 1). Try again.
2523                          */
2524                         ASSERT(rc == -1);
2525                         return (kmem_cache_alloc(cp, kmflag));
2526                 }
2527                 return (buf);
2528         }
2529 
2530         if (cp->cache_constructor != NULL &&
2531             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2532                 atomic_inc_64(&cp->cache_alloc_fail);
2533                 kmem_slab_free(cp, buf);
2534                 return (NULL);
2535         }
2536 
2537         return (buf);
2538 }
2539 
2540 /*
2541  * The freed argument tells whether or not kmem_cache_free_debug() has already
2542  * been called so that we can avoid the duplicate free error. For example, a
2543  * buffer on a magazine has already been freed by the client but is still
2544  * constructed.
2545  */
2546 static void
2547 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2548 {
2549         if (!freed && (cp->cache_flags & KMF_BUFTAG))
2550                 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2551                         return;
2552 
2553         /*
2554          * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2555          * kmem_cache_free_debug() will have already applied the destructor.
2556          */
2557         if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2558             cp->cache_destructor != NULL) {
2559                 if (cp->cache_flags & KMF_DEADBEEF) {    /* KMF_LITE implied */
2560                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2561                         *(uint64_t *)buf = btp->bt_redzone;
2562                         cp->cache_destructor(buf, cp->cache_private);
2563                         *(uint64_t *)buf = KMEM_FREE_PATTERN;
2564                 } else {
2565                         cp->cache_destructor(buf, cp->cache_private);
2566                 }
2567         }
2568 
2569         kmem_slab_free(cp, buf);
2570 }
2571 
2572 /*
2573  * Used when there's no room to free a buffer to the per-CPU cache.
2574  * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2575  * caller should try freeing to the per-CPU cache again.
2576  * Note that we don't directly install the magazine in the cpu cache,
2577  * since its state may have changed wildly while the lock was dropped.
2578  */
2579 static int
2580 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2581 {
2582         kmem_magazine_t *emp;
2583         kmem_magtype_t *mtp;
2584 
2585         ASSERT(MUTEX_HELD(&ccp->cc_lock));
2586         ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2587             ((uint_t)ccp->cc_rounds == -1)) &&
2588             ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2589             ((uint_t)ccp->cc_prounds == -1)));
2590 
2591         emp = kmem_depot_alloc(cp, &cp->cache_empty);
2592         if (emp != NULL) {
2593                 if (ccp->cc_ploaded != NULL)
2594                         kmem_depot_free(cp, &cp->cache_full,
2595                             ccp->cc_ploaded);
2596                 kmem_cpu_reload(ccp, emp, 0);
2597                 return (1);
2598         }
2599         /*
2600          * There are no empty magazines in the depot,
2601          * so try to allocate a new one.  We must drop all locks
2602          * across kmem_cache_alloc() because lower layers may
2603          * attempt to allocate from this cache.
2604          */
2605         mtp = cp->cache_magtype;
2606         mutex_exit(&ccp->cc_lock);
2607         emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2608         mutex_enter(&ccp->cc_lock);
2609 
2610         if (emp != NULL) {
2611                 /*
2612                  * We successfully allocated an empty magazine.
2613                  * However, we had to drop ccp->cc_lock to do it,
2614                  * so the cache's magazine size may have changed.
2615                  * If so, free the magazine and try again.
2616                  */
2617                 if (ccp->cc_magsize != mtp->mt_magsize) {
2618                         mutex_exit(&ccp->cc_lock);
2619                         kmem_cache_free(mtp->mt_cache, emp);
2620                         mutex_enter(&ccp->cc_lock);
2621                         return (1);
2622                 }
2623 
2624                 /*
2625                  * We got a magazine of the right size.  Add it to
2626                  * the depot and try the whole dance again.
2627                  */
2628                 kmem_depot_free(cp, &cp->cache_empty, emp);
2629                 return (1);
2630         }
2631 
2632         /*
2633          * We couldn't allocate an empty magazine,
2634          * so fall through to the slab layer.
2635          */
2636         return (0);
2637 }
2638 
2639 /*
2640  * Free a constructed object to cache cp.
2641  */
2642 void
2643 kmem_cache_free(kmem_cache_t *cp, void *buf)
2644 {
2645         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2646 
2647         /*
2648          * The client must not free either of the buffers passed to the move
2649          * callback function.
2650          */
2651         ASSERT(cp->cache_defrag == NULL ||
2652             cp->cache_defrag->kmd_thread != curthread ||
2653             (buf != cp->cache_defrag->kmd_from_buf &&
2654             buf != cp->cache_defrag->kmd_to_buf));
2655 
2656         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2657                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2658                         ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2659                         /* log it so that we can warn about it */
2660                         cp->cache_dump.kd_unsafe++;
2661                 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2662                         return;
2663                 }
2664                 if (ccp->cc_flags & KMF_BUFTAG) {
2665                         if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2666                                 return;
2667                 }
2668         }
2669 
2670         mutex_enter(&ccp->cc_lock);
2671         /*
2672          * Any changes to this logic should be reflected in kmem_slab_prefill()
2673          */
2674         for (;;) {
2675                 /*
2676                  * If there's a slot available in the current CPU's
2677                  * loaded magazine, just put the object there and return.
2678                  */
2679                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2680                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2681                         ccp->cc_free++;
2682                         mutex_exit(&ccp->cc_lock);
2683                         return;
2684                 }
2685 
2686                 /*
2687                  * The loaded magazine is full.  If the previously loaded
2688                  * magazine was empty, exchange them and try again.
2689                  */
2690                 if (ccp->cc_prounds == 0) {
2691                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2692                         continue;
2693                 }
2694 
2695                 /*
2696                  * If the magazine layer is disabled, break out now.
2697                  */
2698                 if (ccp->cc_magsize == 0)
2699                         break;
2700 
2701                 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2702                         /*
2703                          * We couldn't free our constructed object to the
2704                          * magazine layer, so apply its destructor and free it
2705                          * to the slab layer.
2706                          */
2707                         break;
2708                 }
2709         }
2710         mutex_exit(&ccp->cc_lock);
2711         kmem_slab_free_constructed(cp, buf, B_TRUE);
2712 }
2713 
2714 static void
2715 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2716 {
2717         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2718         int cache_flags = cp->cache_flags;
2719 
2720         kmem_bufctl_t *next, *head;
2721         size_t nbufs;
2722 
2723         /*
2724          * Completely allocate the newly created slab and put the pre-allocated
2725          * buffers in magazines. Any of the buffers that cannot be put in
2726          * magazines must be returned to the slab.
2727          */
2728         ASSERT(MUTEX_HELD(&cp->cache_lock));
2729         ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2730         ASSERT(cp->cache_constructor == NULL);
2731         ASSERT(sp->slab_cache == cp);
2732         ASSERT(sp->slab_refcnt == 1);
2733         ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2734         ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2735 
2736         head = sp->slab_head;
2737         nbufs = (sp->slab_chunks - sp->slab_refcnt);
2738         sp->slab_head = NULL;
2739         sp->slab_refcnt += nbufs;
2740         cp->cache_bufslab -= nbufs;
2741         cp->cache_slab_alloc += nbufs;
2742         list_insert_head(&cp->cache_complete_slabs, sp);
2743         cp->cache_complete_slab_count++;
2744         mutex_exit(&cp->cache_lock);
2745         mutex_enter(&ccp->cc_lock);
2746 
2747         while (head != NULL) {
2748                 void *buf = KMEM_BUF(cp, head);
2749                 /*
2750                  * If there's a slot available in the current CPU's
2751                  * loaded magazine, just put the object there and
2752                  * continue.
2753                  */
2754                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2755                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2756                             buf;
2757                         ccp->cc_free++;
2758                         nbufs--;
2759                         head = head->bc_next;
2760                         continue;
2761                 }
2762 
2763                 /*
2764                  * The loaded magazine is full.  If the previously
2765                  * loaded magazine was empty, exchange them and try
2766                  * again.
2767                  */
2768                 if (ccp->cc_prounds == 0) {
2769                         kmem_cpu_reload(ccp, ccp->cc_ploaded,
2770                             ccp->cc_prounds);
2771                         continue;
2772                 }
2773 
2774                 /*
2775                  * If the magazine layer is disabled, break out now.
2776                  */
2777 
2778                 if (ccp->cc_magsize == 0) {
2779                         break;
2780                 }
2781 
2782                 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2783                         break;
2784         }
2785         mutex_exit(&ccp->cc_lock);
2786         if (nbufs != 0) {
2787                 ASSERT(head != NULL);
2788 
2789                 /*
2790                  * If there was a failure, return remaining objects to
2791                  * the slab
2792                  */
2793                 while (head != NULL) {
2794                         ASSERT(nbufs != 0);
2795                         next = head->bc_next;
2796                         head->bc_next = NULL;
2797                         kmem_slab_free(cp, KMEM_BUF(cp, head));
2798                         head = next;
2799                         nbufs--;
2800                 }
2801         }
2802         ASSERT(head == NULL);
2803         ASSERT(nbufs == 0);
2804         mutex_enter(&cp->cache_lock);
2805 }
2806 
2807 void *
2808 kmem_zalloc(size_t size, int kmflag)
2809 {
2810         size_t index;
2811         void *buf;
2812 
2813         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2814                 kmem_cache_t *cp = kmem_alloc_table[index];
2815                 buf = kmem_cache_alloc(cp, kmflag);
2816                 if (buf != NULL) {
2817                         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2818                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2819                                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2820                                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2821 
2822                                 if (cp->cache_flags & KMF_LITE) {
2823                                         KMEM_BUFTAG_LITE_ENTER(btp,
2824                                             kmem_lite_count, caller());
2825                                 }
2826                         }
2827                         bzero(buf, size);
2828                 }
2829         } else {
2830                 buf = kmem_alloc(size, kmflag);
2831                 if (buf != NULL)
2832                         bzero(buf, size);
2833         }
2834         return (buf);
2835 }
2836 
2837 void *
2838 kmem_alloc(size_t size, int kmflag)
2839 {
2840         size_t index;
2841         kmem_cache_t *cp;
2842         void *buf;
2843 
2844         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2845                 cp = kmem_alloc_table[index];
2846                 /* fall through to kmem_cache_alloc() */
2847 
2848         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2849             kmem_big_alloc_table_max) {
2850                 cp = kmem_big_alloc_table[index];
2851                 /* fall through to kmem_cache_alloc() */
2852 
2853         } else {
2854                 if (size == 0)
2855                         return (NULL);
2856 
2857                 buf = vmem_alloc(kmem_oversize_arena, size,
2858                     kmflag & KM_VMFLAGS);
2859                 if (buf == NULL)
2860                         kmem_log_event(kmem_failure_log, NULL, NULL,
2861                             (void *)size);
2862                 else if (KMEM_DUMP(kmem_slab_cache)) {
2863                         /* stats for dump intercept */
2864                         kmem_dump_oversize_allocs++;
2865                         if (size > kmem_dump_oversize_max)
2866                                 kmem_dump_oversize_max = size;
2867                 }
2868                 return (buf);
2869         }
2870 
2871         buf = kmem_cache_alloc(cp, kmflag);
2872         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2873                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2874                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2875                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2876 
2877                 if (cp->cache_flags & KMF_LITE) {
2878                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2879                 }
2880         }
2881         return (buf);
2882 }
2883 
2884 void
2885 kmem_free(void *buf, size_t size)
2886 {
2887         size_t index;
2888         kmem_cache_t *cp;
2889 
2890         if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2891                 cp = kmem_alloc_table[index];
2892                 /* fall through to kmem_cache_free() */
2893 
2894         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2895             kmem_big_alloc_table_max) {
2896                 cp = kmem_big_alloc_table[index];
2897                 /* fall through to kmem_cache_free() */
2898 
2899         } else {
2900                 EQUIV(buf == NULL, size == 0);
2901                 if (buf == NULL && size == 0)
2902                         return;
2903                 vmem_free(kmem_oversize_arena, buf, size);
2904                 return;
2905         }
2906 
2907         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2908                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2909                 uint32_t *ip = (uint32_t *)btp;
2910                 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2911                         if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2912                                 kmem_error(KMERR_DUPFREE, cp, buf);
2913                                 return;
2914                         }
2915                         if (KMEM_SIZE_VALID(ip[1])) {
2916                                 ip[0] = KMEM_SIZE_ENCODE(size);
2917                                 kmem_error(KMERR_BADSIZE, cp, buf);
2918                         } else {
2919                                 kmem_error(KMERR_REDZONE, cp, buf);
2920                         }
2921                         return;
2922                 }
2923                 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2924                         kmem_error(KMERR_REDZONE, cp, buf);
2925                         return;
2926                 }
2927                 btp->bt_redzone = KMEM_REDZONE_PATTERN;
2928                 if (cp->cache_flags & KMF_LITE) {
2929                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
2930                             caller());
2931                 }
2932         }
2933         kmem_cache_free(cp, buf);
2934 }
2935 
2936 void *
2937 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2938 {
2939         size_t realsize = size + vmp->vm_quantum;
2940         void *addr;
2941 
2942         /*
2943          * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2944          * vm_quantum will cause integer wraparound.  Check for this, and
2945          * blow off the firewall page in this case.  Note that such a
2946          * giant allocation (the entire kernel address space) can never
2947          * be satisfied, so it will either fail immediately (VM_NOSLEEP)
2948          * or sleep forever (VM_SLEEP).  Thus, there is no need for a
2949          * corresponding check in kmem_firewall_va_free().
2950          */
2951         if (realsize < size)
2952                 realsize = size;
2953 
2954         /*
2955          * While boot still owns resource management, make sure that this
2956          * redzone virtual address allocation is properly accounted for in
2957          * OBPs "virtual-memory" "available" lists because we're
2958          * effectively claiming them for a red zone.  If we don't do this,
2959          * the available lists become too fragmented and too large for the
2960          * current boot/kernel memory list interface.
2961          */
2962         addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
2963 
2964         if (addr != NULL && kvseg.s_base == NULL && realsize != size)
2965                 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
2966 
2967         return (addr);
2968 }
2969 
2970 void
2971 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
2972 {
2973         ASSERT((kvseg.s_base == NULL ?
2974             va_to_pfn((char *)addr + size) :
2975             hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
2976 
2977         vmem_free(vmp, addr, size + vmp->vm_quantum);
2978 }
2979 
2980 /*
2981  * Try to allocate at least `size' bytes of memory without sleeping or
2982  * panicking. Return actual allocated size in `asize'. If allocation failed,
2983  * try final allocation with sleep or panic allowed.
2984  */
2985 void *
2986 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
2987 {
2988         void *p;
2989 
2990         *asize = P2ROUNDUP(size, KMEM_ALIGN);
2991         do {
2992                 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
2993                 if (p != NULL)
2994                         return (p);
2995                 *asize += KMEM_ALIGN;
2996         } while (*asize <= PAGESIZE);
2997 
2998         *asize = P2ROUNDUP(size, KMEM_ALIGN);
2999         return (kmem_alloc(*asize, kmflag));
3000 }
3001 
3002 /*
3003  * Reclaim all unused memory from a cache.
3004  */
3005 static void
3006 kmem_cache_reap(kmem_cache_t *cp)
3007 {
3008         ASSERT(taskq_member(kmem_taskq, curthread));
3009         cp->cache_reap++;
3010 
3011         /*
3012          * Ask the cache's owner to free some memory if possible.
3013          * The idea is to handle things like the inode cache, which
3014          * typically sits on a bunch of memory that it doesn't truly
3015          * *need*.  Reclaim policy is entirely up to the owner; this
3016          * callback is just an advisory plea for help.
3017          */
3018         if (cp->cache_reclaim != NULL) {
3019                 long delta;
3020 
3021                 /*
3022                  * Reclaimed memory should be reapable (not included in the
3023                  * depot's working set).
3024                  */
3025                 delta = cp->cache_full.ml_total;
3026                 cp->cache_reclaim(cp->cache_private);
3027                 delta = cp->cache_full.ml_total - delta;
3028                 if (delta > 0) {
3029                         mutex_enter(&cp->cache_depot_lock);
3030                         cp->cache_full.ml_reaplimit += delta;
3031                         cp->cache_full.ml_min += delta;
3032                         mutex_exit(&cp->cache_depot_lock);
3033                 }
3034         }
3035 
3036         kmem_depot_ws_reap(cp);
3037 
3038         if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3039                 kmem_cache_defrag(cp);
3040         }
3041 }
3042 
3043 static void
3044 kmem_reap_timeout(void *flag_arg)
3045 {
3046         uint32_t *flag = (uint32_t *)flag_arg;
3047 
3048         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3049         *flag = 0;
3050 }
3051 
3052 static void
3053 kmem_reap_done(void *flag)
3054 {
3055         if (!callout_init_done) {
3056                 /* can't schedule a timeout at this point */
3057                 kmem_reap_timeout(flag);
3058         } else {
3059                 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3060         }
3061 }
3062 
3063 static void
3064 kmem_reap_start(void *flag)
3065 {
3066         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3067 
3068         if (flag == &kmem_reaping) {
3069                 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3070                 /*
3071                  * if we have segkp under heap, reap segkp cache.
3072                  */
3073                 if (segkp_fromheap)
3074                         segkp_cache_free();
3075         }
3076         else
3077                 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3078 
3079         /*
3080          * We use taskq_dispatch() to schedule a timeout to clear
3081          * the flag so that kmem_reap() becomes self-throttling:
3082          * we won't reap again until the current reap completes *and*
3083          * at least kmem_reap_interval ticks have elapsed.
3084          */
3085         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3086                 kmem_reap_done(flag);
3087 }
3088 
3089 static void
3090 kmem_reap_common(void *flag_arg)
3091 {
3092         uint32_t *flag = (uint32_t *)flag_arg;
3093 
3094         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3095             atomic_cas_32(flag, 0, 1) != 0)
3096                 return;
3097 
3098         /*
3099          * It may not be kosher to do memory allocation when a reap is called
3100          * (for example, if vmem_populate() is in the call chain).  So we
3101          * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3102          * fails, we reset the flag, and the next reap will try again.
3103          */
3104         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3105                 *flag = 0;
3106 }
3107 
3108 /*
3109  * Reclaim all unused memory from all caches.  Called from the VM system
3110  * when memory gets tight.
3111  */
3112 void
3113 kmem_reap(void)
3114 {
3115         kmem_reap_common(&kmem_reaping);
3116 }
3117 
3118 /*
3119  * Reclaim all unused memory from identifier arenas, called when a vmem
3120  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3121  * cannot help with identifier exhaustion, we avoid both a large amount of
3122  * work and unwanted side-effects from reclaim callbacks.
3123  */
3124 void
3125 kmem_reap_idspace(void)
3126 {
3127         kmem_reap_common(&kmem_reaping_idspace);
3128 }
3129 
3130 /*
3131  * Purge all magazines from a cache and set its magazine limit to zero.
3132  * All calls are serialized by the kmem_taskq lock, except for the final
3133  * call from kmem_cache_destroy().
3134  */
3135 static void
3136 kmem_cache_magazine_purge(kmem_cache_t *cp)
3137 {
3138         kmem_cpu_cache_t *ccp;
3139         kmem_magazine_t *mp, *pmp;
3140         int rounds, prounds, cpu_seqid;
3141 
3142         ASSERT(!list_link_active(&cp->cache_link) ||
3143             taskq_member(kmem_taskq, curthread));
3144         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3145 
3146         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3147                 ccp = &cp->cache_cpu[cpu_seqid];
3148 
3149                 mutex_enter(&ccp->cc_lock);
3150                 mp = ccp->cc_loaded;
3151                 pmp = ccp->cc_ploaded;
3152                 rounds = ccp->cc_rounds;
3153                 prounds = ccp->cc_prounds;
3154                 ccp->cc_loaded = NULL;
3155                 ccp->cc_ploaded = NULL;
3156                 ccp->cc_rounds = -1;
3157                 ccp->cc_prounds = -1;
3158                 ccp->cc_magsize = 0;
3159                 mutex_exit(&ccp->cc_lock);
3160 
3161                 if (mp)
3162                         kmem_magazine_destroy(cp, mp, rounds);
3163                 if (pmp)
3164                         kmem_magazine_destroy(cp, pmp, prounds);
3165         }
3166 
3167         kmem_depot_ws_zero(cp);
3168         kmem_depot_ws_reap(cp);
3169 }
3170 
3171 /*
3172  * Enable per-cpu magazines on a cache.
3173  */
3174 static void
3175 kmem_cache_magazine_enable(kmem_cache_t *cp)
3176 {
3177         int cpu_seqid;
3178 
3179         if (cp->cache_flags & KMF_NOMAGAZINE)
3180                 return;
3181 
3182         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3183                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3184                 mutex_enter(&ccp->cc_lock);
3185                 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3186                 mutex_exit(&ccp->cc_lock);
3187         }
3188 
3189 }
3190 
3191 /*
3192  * Allow our caller to determine if there are running reaps.
3193  *
3194  * This call is very conservative and may return B_TRUE even when
3195  * reaping activity isn't active. If it returns B_FALSE, then reaping
3196  * activity is definitely inactive.
3197  */
3198 boolean_t
3199 kmem_cache_reap_active(void)
3200 {
3201         return (!taskq_empty(kmem_taskq));
3202 }
3203 
3204 /*
3205  * Reap (almost) everything soon.
3206  *
3207  * Note: this does not wait for the reap-tasks to complete. Caller
3208  * should use kmem_cache_reap_active() (above) and/or moderation to
3209  * avoid scheduling too many reap-tasks.
3210  */
3211 void
3212 kmem_cache_reap_soon(kmem_cache_t *cp)
3213 {
3214         ASSERT(list_link_active(&cp->cache_link));
3215 
3216         kmem_depot_ws_zero(cp);
3217 
3218         (void) taskq_dispatch(kmem_taskq,
3219             (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3220 }
3221 
3222 /*
3223  * Recompute a cache's magazine size.  The trade-off is that larger magazines
3224  * provide a higher transfer rate with the depot, while smaller magazines
3225  * reduce memory consumption.  Magazine resizing is an expensive operation;
3226  * it should not be done frequently.
3227  *
3228  * Changes to the magazine size are serialized by the kmem_taskq lock.
3229  *
3230  * Note: at present this only grows the magazine size.  It might be useful
3231  * to allow shrinkage too.
3232  */
3233 static void
3234 kmem_cache_magazine_resize(kmem_cache_t *cp)
3235 {
3236         kmem_magtype_t *mtp = cp->cache_magtype;
3237 
3238         ASSERT(taskq_member(kmem_taskq, curthread));
3239 
3240         if (cp->cache_chunksize < mtp->mt_maxbuf) {
3241                 kmem_cache_magazine_purge(cp);
3242                 mutex_enter(&cp->cache_depot_lock);
3243                 cp->cache_magtype = ++mtp;
3244                 cp->cache_depot_contention_prev =
3245                     cp->cache_depot_contention + INT_MAX;
3246                 mutex_exit(&cp->cache_depot_lock);
3247                 kmem_cache_magazine_enable(cp);
3248         }
3249 }
3250 
3251 /*
3252  * Rescale a cache's hash table, so that the table size is roughly the
3253  * cache size.  We want the average lookup time to be extremely small.
3254  */
3255 static void
3256 kmem_hash_rescale(kmem_cache_t *cp)
3257 {
3258         kmem_bufctl_t **old_table, **new_table, *bcp;
3259         size_t old_size, new_size, h;
3260 
3261         ASSERT(taskq_member(kmem_taskq, curthread));
3262 
3263         new_size = MAX(KMEM_HASH_INITIAL,
3264             1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3265         old_size = cp->cache_hash_mask + 1;
3266 
3267         if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3268                 return;
3269 
3270         new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3271             VM_NOSLEEP);
3272         if (new_table == NULL)
3273                 return;
3274         bzero(new_table, new_size * sizeof (void *));
3275 
3276         mutex_enter(&cp->cache_lock);
3277 
3278         old_size = cp->cache_hash_mask + 1;
3279         old_table = cp->cache_hash_table;
3280 
3281         cp->cache_hash_mask = new_size - 1;
3282         cp->cache_hash_table = new_table;
3283         cp->cache_rescale++;
3284 
3285         for (h = 0; h < old_size; h++) {
3286                 bcp = old_table[h];
3287                 while (bcp != NULL) {
3288                         void *addr = bcp->bc_addr;
3289                         kmem_bufctl_t *next_bcp = bcp->bc_next;
3290                         kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3291                         bcp->bc_next = *hash_bucket;
3292                         *hash_bucket = bcp;
3293                         bcp = next_bcp;
3294                 }
3295         }
3296 
3297         mutex_exit(&cp->cache_lock);
3298 
3299         vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3300 }
3301 
3302 /*
3303  * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3304  * update, magazine resizing, and slab consolidation.
3305  */
3306 static void
3307 kmem_cache_update(kmem_cache_t *cp)
3308 {
3309         int need_hash_rescale = 0;
3310         int need_magazine_resize = 0;
3311 
3312         ASSERT(MUTEX_HELD(&kmem_cache_lock));
3313 
3314         /*
3315          * If the cache has become much larger or smaller than its hash table,
3316          * fire off a request to rescale the hash table.
3317          */
3318         mutex_enter(&cp->cache_lock);
3319 
3320         if ((cp->cache_flags & KMF_HASH) &&
3321             (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3322             (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3323             cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3324                 need_hash_rescale = 1;
3325 
3326         mutex_exit(&cp->cache_lock);
3327 
3328         /*
3329          * Update the depot working set statistics.
3330          */
3331         kmem_depot_ws_update(cp);
3332 
3333         /*
3334          * If there's a lot of contention in the depot,
3335          * increase the magazine size.
3336          */
3337         mutex_enter(&cp->cache_depot_lock);
3338 
3339         if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3340             (int)(cp->cache_depot_contention -
3341             cp->cache_depot_contention_prev) > kmem_depot_contention)
3342                 need_magazine_resize = 1;
3343 
3344         cp->cache_depot_contention_prev = cp->cache_depot_contention;
3345 
3346         mutex_exit(&cp->cache_depot_lock);
3347 
3348         if (need_hash_rescale)
3349                 (void) taskq_dispatch(kmem_taskq,
3350                     (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3351 
3352         if (need_magazine_resize)
3353                 (void) taskq_dispatch(kmem_taskq,
3354                     (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3355 
3356         if (cp->cache_defrag != NULL)
3357                 (void) taskq_dispatch(kmem_taskq,
3358                     (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3359 }
3360 
3361 static void kmem_update(void *);
3362 
3363 static void
3364 kmem_update_timeout(void *dummy)
3365 {
3366         (void) timeout(kmem_update, dummy, kmem_reap_interval);
3367 }
3368 
3369 static void
3370 kmem_update(void *dummy)
3371 {
3372         kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3373 
3374         /*
3375          * We use taskq_dispatch() to reschedule the timeout so that
3376          * kmem_update() becomes self-throttling: it won't schedule
3377          * new tasks until all previous tasks have completed.
3378          */
3379         if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3380                 kmem_update_timeout(NULL);
3381 }
3382 
3383 static int
3384 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3385 {
3386         struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3387         kmem_cache_t *cp = ksp->ks_private;
3388         uint64_t cpu_buf_avail;
3389         uint64_t buf_avail = 0;
3390         int cpu_seqid;
3391         long reap;
3392 
3393         ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3394 
3395         if (rw == KSTAT_WRITE)
3396                 return (EACCES);
3397 
3398         mutex_enter(&cp->cache_lock);
3399 
3400         kmcp->kmc_alloc_fail.value.ui64              = cp->cache_alloc_fail;
3401         kmcp->kmc_alloc.value.ui64           = cp->cache_slab_alloc;
3402         kmcp->kmc_free.value.ui64            = cp->cache_slab_free;
3403         kmcp->kmc_slab_alloc.value.ui64              = cp->cache_slab_alloc;
3404         kmcp->kmc_slab_free.value.ui64               = cp->cache_slab_free;
3405 
3406         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3407                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3408 
3409                 mutex_enter(&ccp->cc_lock);
3410 
3411                 cpu_buf_avail = 0;
3412                 if (ccp->cc_rounds > 0)
3413                         cpu_buf_avail += ccp->cc_rounds;
3414                 if (ccp->cc_prounds > 0)
3415                         cpu_buf_avail += ccp->cc_prounds;
3416 
3417                 kmcp->kmc_alloc.value.ui64   += ccp->cc_alloc;
3418                 kmcp->kmc_free.value.ui64    += ccp->cc_free;
3419                 buf_avail                       += cpu_buf_avail;
3420 
3421                 mutex_exit(&ccp->cc_lock);
3422         }
3423 
3424         mutex_enter(&cp->cache_depot_lock);
3425 
3426         kmcp->kmc_depot_alloc.value.ui64     = cp->cache_full.ml_alloc;
3427         kmcp->kmc_depot_free.value.ui64              = cp->cache_empty.ml_alloc;
3428         kmcp->kmc_depot_contention.value.ui64        = cp->cache_depot_contention;
3429         kmcp->kmc_full_magazines.value.ui64  = cp->cache_full.ml_total;
3430         kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3431         kmcp->kmc_magazine_size.value.ui64   =
3432             (cp->cache_flags & KMF_NOMAGAZINE) ?
3433             0 : cp->cache_magtype->mt_magsize;
3434 
3435         kmcp->kmc_alloc.value.ui64           += cp->cache_full.ml_alloc;
3436         kmcp->kmc_free.value.ui64            += cp->cache_empty.ml_alloc;
3437         buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3438 
3439         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3440         reap = MIN(reap, cp->cache_full.ml_total);
3441 
3442         mutex_exit(&cp->cache_depot_lock);
3443 
3444         kmcp->kmc_buf_size.value.ui64        = cp->cache_bufsize;
3445         kmcp->kmc_align.value.ui64   = cp->cache_align;
3446         kmcp->kmc_chunk_size.value.ui64      = cp->cache_chunksize;
3447         kmcp->kmc_slab_size.value.ui64       = cp->cache_slabsize;
3448         kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3449         buf_avail += cp->cache_bufslab;
3450         kmcp->kmc_buf_avail.value.ui64       = buf_avail;
3451         kmcp->kmc_buf_inuse.value.ui64       = cp->cache_buftotal - buf_avail;
3452         kmcp->kmc_buf_total.value.ui64       = cp->cache_buftotal;
3453         kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3454         kmcp->kmc_slab_create.value.ui64     = cp->cache_slab_create;
3455         kmcp->kmc_slab_destroy.value.ui64    = cp->cache_slab_destroy;
3456         kmcp->kmc_hash_size.value.ui64       = (cp->cache_flags & KMF_HASH) ?
3457             cp->cache_hash_mask + 1 : 0;
3458         kmcp->kmc_hash_lookup_depth.value.ui64       = cp->cache_lookup_depth;
3459         kmcp->kmc_hash_rescale.value.ui64    = cp->cache_rescale;
3460         kmcp->kmc_vmem_source.value.ui64     = cp->cache_arena->vm_id;
3461         kmcp->kmc_reap.value.ui64    = cp->cache_reap;
3462 
3463         if (cp->cache_defrag == NULL) {
3464                 kmcp->kmc_move_callbacks.value.ui64  = 0;
3465                 kmcp->kmc_move_yes.value.ui64                = 0;
3466                 kmcp->kmc_move_no.value.ui64         = 0;
3467                 kmcp->kmc_move_later.value.ui64              = 0;
3468                 kmcp->kmc_move_dont_need.value.ui64  = 0;
3469                 kmcp->kmc_move_dont_know.value.ui64  = 0;
3470                 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3471                 kmcp->kmc_move_slabs_freed.value.ui64        = 0;
3472                 kmcp->kmc_defrag.value.ui64          = 0;
3473                 kmcp->kmc_scan.value.ui64            = 0;
3474                 kmcp->kmc_move_reclaimable.value.ui64        = 0;
3475         } else {
3476                 int64_t reclaimable;
3477 
3478                 kmem_defrag_t *kd = cp->cache_defrag;
3479                 kmcp->kmc_move_callbacks.value.ui64  = kd->kmd_callbacks;
3480                 kmcp->kmc_move_yes.value.ui64                = kd->kmd_yes;
3481                 kmcp->kmc_move_no.value.ui64         = kd->kmd_no;
3482                 kmcp->kmc_move_later.value.ui64              = kd->kmd_later;
3483                 kmcp->kmc_move_dont_need.value.ui64  = kd->kmd_dont_need;
3484                 kmcp->kmc_move_dont_know.value.ui64  = kd->kmd_dont_know;
3485                 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3486                 kmcp->kmc_move_slabs_freed.value.ui64        = kd->kmd_slabs_freed;
3487                 kmcp->kmc_defrag.value.ui64          = kd->kmd_defrags;
3488                 kmcp->kmc_scan.value.ui64            = kd->kmd_scans;
3489 
3490                 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3491                 reclaimable = MAX(reclaimable, 0);
3492                 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3493                 kmcp->kmc_move_reclaimable.value.ui64        = reclaimable;
3494         }
3495 
3496         mutex_exit(&cp->cache_lock);
3497         return (0);
3498 }
3499 
3500 /*
3501  * Return a named statistic about a particular cache.
3502  * This shouldn't be called very often, so it's currently designed for
3503  * simplicity (leverages existing kstat support) rather than efficiency.
3504  */
3505 uint64_t
3506 kmem_cache_stat(kmem_cache_t *cp, char *name)
3507 {
3508         int i;
3509         kstat_t *ksp = cp->cache_kstat;
3510         kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3511         uint64_t value = 0;
3512 
3513         if (ksp != NULL) {
3514                 mutex_enter(&kmem_cache_kstat_lock);
3515                 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3516                 for (i = 0; i < ksp->ks_ndata; i++) {
3517                         if (strcmp(knp[i].name, name) == 0) {
3518                                 value = knp[i].value.ui64;
3519                                 break;
3520                         }
3521                 }
3522                 mutex_exit(&kmem_cache_kstat_lock);
3523         }
3524         return (value);
3525 }
3526 
3527 /*
3528  * Return an estimate of currently available kernel heap memory.
3529  * On 32-bit systems, physical memory may exceed virtual memory,
3530  * we just truncate the result at 1GB.
3531  */
3532 size_t
3533 kmem_avail(void)
3534 {
3535         spgcnt_t rmem = availrmem - tune.t_minarmem;
3536         spgcnt_t fmem = freemem - minfree;
3537 
3538         return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3539             1 << (30 - PAGESHIFT))));
3540 }
3541 
3542 /*
3543  * Return the maximum amount of memory that is (in theory) allocatable
3544  * from the heap. This may be used as an estimate only since there
3545  * is no guarentee this space will still be available when an allocation
3546  * request is made, nor that the space may be allocated in one big request
3547  * due to kernel heap fragmentation.
3548  */
3549 size_t
3550 kmem_maxavail(void)
3551 {
3552         spgcnt_t pmem = availrmem - tune.t_minarmem;
3553         spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3554 
3555         return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3556 }
3557 
3558 /*
3559  * Indicate whether memory-intensive kmem debugging is enabled.
3560  */
3561 int
3562 kmem_debugging(void)
3563 {
3564         return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3565 }
3566 
3567 /* binning function, sorts finely at the two extremes */
3568 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)                          \
3569         ((((sp)->slab_refcnt <= (binshift)) ||                            \
3570             (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))   \
3571             ? -(sp)->slab_refcnt                                     \
3572             : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3573 
3574 /*
3575  * Minimizing the number of partial slabs on the freelist minimizes
3576  * fragmentation (the ratio of unused buffers held by the slab layer). There are
3577  * two ways to get a slab off of the freelist: 1) free all the buffers on the
3578  * slab, and 2) allocate all the buffers on the slab. It follows that we want
3579  * the most-used slabs at the front of the list where they have the best chance
3580  * of being completely allocated, and the least-used slabs at a safe distance
3581  * from the front to improve the odds that the few remaining buffers will all be
3582  * freed before another allocation can tie up the slab. For that reason a slab
3583  * with a higher slab_refcnt sorts less than than a slab with a lower
3584  * slab_refcnt.
3585  *
3586  * However, if a slab has at least one buffer that is deemed unfreeable, we
3587  * would rather have that slab at the front of the list regardless of
3588  * slab_refcnt, since even one unfreeable buffer makes the entire slab
3589  * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3590  * callback, the slab is marked unfreeable for as long as it remains on the
3591  * freelist.
3592  */
3593 static int
3594 kmem_partial_slab_cmp(const void *p0, const void *p1)
3595 {
3596         const kmem_cache_t *cp;
3597         const kmem_slab_t *s0 = p0;
3598         const kmem_slab_t *s1 = p1;
3599         int w0, w1;
3600         size_t binshift;
3601 
3602         ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3603         ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3604         ASSERT(s0->slab_cache == s1->slab_cache);
3605         cp = s1->slab_cache;
3606         ASSERT(MUTEX_HELD(&cp->cache_lock));
3607         binshift = cp->cache_partial_binshift;
3608 
3609         /* weight of first slab */
3610         w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3611         if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3612                 w0 -= cp->cache_maxchunks;
3613         }
3614 
3615         /* weight of second slab */
3616         w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3617         if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3618                 w1 -= cp->cache_maxchunks;
3619         }
3620 
3621         if (w0 < w1)
3622                 return (-1);
3623         if (w0 > w1)
3624                 return (1);
3625 
3626         /* compare pointer values */
3627         if ((uintptr_t)s0 < (uintptr_t)s1)
3628                 return (-1);
3629         if ((uintptr_t)s0 > (uintptr_t)s1)
3630                 return (1);
3631 
3632         return (0);
3633 }
3634 
3635 /*
3636  * It must be valid to call the destructor (if any) on a newly created object.
3637  * That is, the constructor (if any) must leave the object in a valid state for
3638  * the destructor.
3639  */
3640 kmem_cache_t *
3641 kmem_cache_create(
3642         char *name,             /* descriptive name for this cache */
3643         size_t bufsize,         /* size of the objects it manages */
3644         size_t align,           /* required object alignment */
3645         int (*constructor)(void *, void *, int), /* object constructor */
3646         void (*destructor)(void *, void *),     /* object destructor */
3647         void (*reclaim)(void *), /* memory reclaim callback */
3648         void *private,          /* pass-thru arg for constr/destr/reclaim */
3649         vmem_t *vmp,            /* vmem source for slab allocation */
3650         int cflags)             /* cache creation flags */
3651 {
3652         int cpu_seqid;
3653         size_t chunksize;
3654         kmem_cache_t *cp;
3655         kmem_magtype_t *mtp;
3656         size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3657 
3658 #ifdef  DEBUG
3659         /*
3660          * Cache names should conform to the rules for valid C identifiers
3661          */
3662         if (!strident_valid(name)) {
3663                 cmn_err(CE_CONT,
3664                     "kmem_cache_create: '%s' is an invalid cache name\n"
3665                     "cache names must conform to the rules for "
3666                     "C identifiers\n", name);
3667         }
3668 #endif  /* DEBUG */
3669 
3670         if (vmp == NULL)
3671                 vmp = kmem_default_arena;
3672 
3673         /*
3674          * If this kmem cache has an identifier vmem arena as its source, mark
3675          * it such to allow kmem_reap_idspace().
3676          */
3677         ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3678         if (vmp->vm_cflags & VMC_IDENTIFIER)
3679                 cflags |= KMC_IDENTIFIER;
3680 
3681         /*
3682          * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3683          * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3684          * false sharing of per-CPU data.
3685          */
3686         cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3687             P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3688         bzero(cp, csize);
3689         list_link_init(&cp->cache_link);
3690 
3691         if (align == 0)
3692                 align = KMEM_ALIGN;
3693 
3694         /*
3695          * If we're not at least KMEM_ALIGN aligned, we can't use free
3696          * memory to hold bufctl information (because we can't safely
3697          * perform word loads and stores on it).
3698          */
3699         if (align < KMEM_ALIGN)
3700                 cflags |= KMC_NOTOUCH;
3701 
3702         if (!ISP2(align) || align > vmp->vm_quantum)
3703                 panic("kmem_cache_create: bad alignment %lu", align);
3704 
3705         mutex_enter(&kmem_flags_lock);
3706         if (kmem_flags & KMF_RANDOMIZE)
3707                 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3708                     KMF_RANDOMIZE;
3709         cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3710         mutex_exit(&kmem_flags_lock);
3711 
3712         /*
3713          * Make sure all the various flags are reasonable.
3714          */
3715         ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3716 
3717         if (cp->cache_flags & KMF_LITE) {
3718                 if (bufsize >= kmem_lite_minsize &&
3719                     align <= kmem_lite_maxalign &&
3720                     P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3721                         cp->cache_flags |= KMF_BUFTAG;
3722                         cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3723                 } else {
3724                         cp->cache_flags &= ~KMF_DEBUG;
3725                 }
3726         }
3727 
3728         if (cp->cache_flags & KMF_DEADBEEF)
3729                 cp->cache_flags |= KMF_REDZONE;
3730 
3731         if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3732                 cp->cache_flags |= KMF_NOMAGAZINE;
3733 
3734         if (cflags & KMC_NODEBUG)
3735                 cp->cache_flags &= ~KMF_DEBUG;
3736 
3737         if (cflags & KMC_NOTOUCH)
3738                 cp->cache_flags &= ~KMF_TOUCH;
3739 
3740         if (cflags & KMC_PREFILL)
3741                 cp->cache_flags |= KMF_PREFILL;
3742 
3743         if (cflags & KMC_NOHASH)
3744                 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3745 
3746         if (cflags & KMC_NOMAGAZINE)
3747                 cp->cache_flags |= KMF_NOMAGAZINE;
3748 
3749         if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3750                 cp->cache_flags |= KMF_REDZONE;
3751 
3752         if (!(cp->cache_flags & KMF_AUDIT))
3753                 cp->cache_flags &= ~KMF_CONTENTS;
3754 
3755         if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3756             !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3757                 cp->cache_flags |= KMF_FIREWALL;
3758 
3759         if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3760                 cp->cache_flags &= ~KMF_FIREWALL;
3761 
3762         if (cp->cache_flags & KMF_FIREWALL) {
3763                 cp->cache_flags &= ~KMF_BUFTAG;
3764                 cp->cache_flags |= KMF_NOMAGAZINE;
3765                 ASSERT(vmp == kmem_default_arena);
3766                 vmp = kmem_firewall_arena;
3767         }
3768 
3769         /*
3770          * Set cache properties.
3771          */
3772         (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3773         strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3774         cp->cache_bufsize = bufsize;
3775         cp->cache_align = align;
3776         cp->cache_constructor = constructor;
3777         cp->cache_destructor = destructor;
3778         cp->cache_reclaim = reclaim;
3779         cp->cache_private = private;
3780         cp->cache_arena = vmp;
3781         cp->cache_cflags = cflags;
3782 
3783         /*
3784          * Determine the chunk size.
3785          */
3786         chunksize = bufsize;
3787 
3788         if (align >= KMEM_ALIGN) {
3789                 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3790                 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3791         }
3792 
3793         if (cp->cache_flags & KMF_BUFTAG) {
3794                 cp->cache_bufctl = chunksize;
3795                 cp->cache_buftag = chunksize;
3796                 if (cp->cache_flags & KMF_LITE)
3797                         chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3798                 else
3799                         chunksize += sizeof (kmem_buftag_t);
3800         }
3801 
3802         if (cp->cache_flags & KMF_DEADBEEF) {
3803                 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3804                 if (cp->cache_flags & KMF_LITE)
3805                         cp->cache_verify = sizeof (uint64_t);
3806         }
3807 
3808         cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3809 
3810         cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3811 
3812         /*
3813          * Now that we know the chunk size, determine the optimal slab size.
3814          */
3815         if (vmp == kmem_firewall_arena) {
3816                 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3817                 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3818                 cp->cache_maxcolor = cp->cache_mincolor;
3819                 cp->cache_flags |= KMF_HASH;
3820                 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3821         } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3822             !(cp->cache_flags & KMF_AUDIT) &&
3823             chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3824                 cp->cache_slabsize = vmp->vm_quantum;
3825                 cp->cache_mincolor = 0;
3826                 cp->cache_maxcolor =
3827                     (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3828                 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3829                 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3830         } else {
3831                 size_t chunks, bestfit, waste, slabsize;
3832                 size_t minwaste = LONG_MAX;
3833 
3834                 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3835                         slabsize = P2ROUNDUP(chunksize * chunks,
3836                             vmp->vm_quantum);
3837                         chunks = slabsize / chunksize;
3838                         waste = (slabsize % chunksize) / chunks;
3839                         if (waste < minwaste) {
3840                                 minwaste = waste;
3841                                 bestfit = slabsize;
3842                         }
3843                 }
3844                 if (cflags & KMC_QCACHE)
3845                         bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3846                 cp->cache_slabsize = bestfit;
3847                 cp->cache_mincolor = 0;
3848                 cp->cache_maxcolor = bestfit % chunksize;
3849                 cp->cache_flags |= KMF_HASH;
3850         }
3851 
3852         cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3853         cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3854 
3855         /*
3856          * Disallowing prefill when either the DEBUG or HASH flag is set or when
3857          * there is a constructor avoids some tricky issues with debug setup
3858          * that may be revisited later. We cannot allow prefill in a
3859          * metadata cache because of potential recursion.
3860          */
3861         if (vmp == kmem_msb_arena ||
3862             cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3863             cp->cache_constructor != NULL)
3864                 cp->cache_flags &= ~KMF_PREFILL;
3865 
3866         if (cp->cache_flags & KMF_HASH) {
3867                 ASSERT(!(cflags & KMC_NOHASH));
3868                 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3869                     kmem_bufctl_audit_cache : kmem_bufctl_cache;
3870         }
3871 
3872         if (cp->cache_maxcolor >= vmp->vm_quantum)
3873                 cp->cache_maxcolor = vmp->vm_quantum - 1;
3874 
3875         cp->cache_color = cp->cache_mincolor;
3876 
3877         /*
3878          * Initialize the rest of the slab layer.
3879          */
3880         mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3881 
3882         avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3883             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3884         /* LINTED: E_TRUE_LOGICAL_EXPR */
3885         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3886         /* reuse partial slab AVL linkage for complete slab list linkage */
3887         list_create(&cp->cache_complete_slabs,
3888             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3889 
3890         if (cp->cache_flags & KMF_HASH) {
3891                 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3892                     KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3893                 bzero(cp->cache_hash_table,
3894                     KMEM_HASH_INITIAL * sizeof (void *));
3895                 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3896                 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3897         }
3898 
3899         /*
3900          * Initialize the depot.
3901          */
3902         mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3903 
3904         for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3905                 continue;
3906 
3907         cp->cache_magtype = mtp;
3908 
3909         /*
3910          * Initialize the CPU layer.
3911          */
3912         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3913                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3914                 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3915                 ccp->cc_flags = cp->cache_flags;
3916                 ccp->cc_rounds = -1;
3917                 ccp->cc_prounds = -1;
3918         }
3919 
3920         /*
3921          * Create the cache's kstats.
3922          */
3923         if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3924             "kmem_cache", KSTAT_TYPE_NAMED,
3925             sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3926             KSTAT_FLAG_VIRTUAL)) != NULL) {
3927                 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3928                 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3929                 cp->cache_kstat->ks_private = cp;
3930                 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3931                 kstat_install(cp->cache_kstat);
3932         }
3933 
3934         /*
3935          * Add the cache to the global list.  This makes it visible
3936          * to kmem_update(), so the cache must be ready for business.
3937          */
3938         mutex_enter(&kmem_cache_lock);
3939         list_insert_tail(&kmem_caches, cp);
3940         mutex_exit(&kmem_cache_lock);
3941 
3942         if (kmem_ready)
3943                 kmem_cache_magazine_enable(cp);
3944 
3945         return (cp);
3946 }
3947 
3948 static int
3949 kmem_move_cmp(const void *buf, const void *p)
3950 {
3951         const kmem_move_t *kmm = p;
3952         uintptr_t v1 = (uintptr_t)buf;
3953         uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
3954         return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
3955 }
3956 
3957 static void
3958 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
3959 {
3960         kmd->kmd_reclaim_numer = 1;
3961 }
3962 
3963 /*
3964  * Initially, when choosing candidate slabs for buffers to move, we want to be
3965  * very selective and take only slabs that are less than
3966  * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
3967  * slabs, then we raise the allocation ceiling incrementally. The reclaim
3968  * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
3969  * longer fragmented.
3970  */
3971 static void
3972 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
3973 {
3974         if (direction > 0) {
3975                 /* make it easier to find a candidate slab */
3976                 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
3977                         kmd->kmd_reclaim_numer++;
3978                 }
3979         } else {
3980                 /* be more selective */
3981                 if (kmd->kmd_reclaim_numer > 1) {
3982                         kmd->kmd_reclaim_numer--;
3983                 }
3984         }
3985 }
3986 
3987 void
3988 kmem_cache_set_move(kmem_cache_t *cp,
3989     kmem_cbrc_t (*move)(void *, void *, size_t, void *))
3990 {
3991         kmem_defrag_t *defrag;
3992 
3993         ASSERT(move != NULL);
3994         /*
3995          * The consolidator does not support NOTOUCH caches because kmem cannot
3996          * initialize their slabs with the 0xbaddcafe memory pattern, which sets
3997          * a low order bit usable by clients to distinguish uninitialized memory
3998          * from known objects (see kmem_slab_create).
3999          */
4000         ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4001         ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4002 
4003         /*
4004          * We should not be holding anyone's cache lock when calling
4005          * kmem_cache_alloc(), so allocate in all cases before acquiring the
4006          * lock.
4007          */
4008         defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4009 
4010         mutex_enter(&cp->cache_lock);
4011 
4012         if (KMEM_IS_MOVABLE(cp)) {
4013                 if (cp->cache_move == NULL) {
4014                         ASSERT(cp->cache_slab_alloc == 0);
4015 
4016                         cp->cache_defrag = defrag;
4017                         defrag = NULL; /* nothing to free */
4018                         bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4019                         avl_create(&cp->cache_defrag->kmd_moves_pending,
4020                             kmem_move_cmp, sizeof (kmem_move_t),
4021                             offsetof(kmem_move_t, kmm_entry));
4022                         /* LINTED: E_TRUE_LOGICAL_EXPR */
4023                         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4024                         /* reuse the slab's AVL linkage for deadlist linkage */
4025                         list_create(&cp->cache_defrag->kmd_deadlist,
4026                             sizeof (kmem_slab_t),
4027                             offsetof(kmem_slab_t, slab_link));
4028                         kmem_reset_reclaim_threshold(cp->cache_defrag);
4029                 }
4030                 cp->cache_move = move;
4031         }
4032 
4033         mutex_exit(&cp->cache_lock);
4034 
4035         if (defrag != NULL) {
4036                 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4037         }
4038 }
4039 
4040 void
4041 kmem_cache_destroy(kmem_cache_t *cp)
4042 {
4043         int cpu_seqid;
4044 
4045         /*
4046          * Remove the cache from the global cache list so that no one else
4047          * can schedule tasks on its behalf, wait for any pending tasks to
4048          * complete, purge the cache, and then destroy it.
4049          */
4050         mutex_enter(&kmem_cache_lock);
4051         list_remove(&kmem_caches, cp);
4052         mutex_exit(&kmem_cache_lock);
4053 
4054         if (kmem_taskq != NULL)
4055                 taskq_wait(kmem_taskq);
4056 
4057         if (kmem_move_taskq != NULL && cp->cache_defrag != NULL)
4058                 taskq_wait(kmem_move_taskq);
4059 
4060         kmem_cache_magazine_purge(cp);
4061 
4062         mutex_enter(&cp->cache_lock);
4063         if (cp->cache_buftotal != 0)
4064                 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4065                     cp->cache_name, (void *)cp);
4066         if (cp->cache_defrag != NULL) {
4067                 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4068                 list_destroy(&cp->cache_defrag->kmd_deadlist);
4069                 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4070                 cp->cache_defrag = NULL;
4071         }
4072         /*
4073          * The cache is now dead.  There should be no further activity.  We
4074          * enforce this by setting land mines in the constructor, destructor,
4075          * reclaim, and move routines that induce a kernel text fault if
4076          * invoked.
4077          */
4078         cp->cache_constructor = (int (*)(void *, void *, int))1;
4079         cp->cache_destructor = (void (*)(void *, void *))2;
4080         cp->cache_reclaim = (void (*)(void *))3;
4081         cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4082         mutex_exit(&cp->cache_lock);
4083 
4084         kstat_delete(cp->cache_kstat);
4085 
4086         if (cp->cache_hash_table != NULL)
4087                 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4088                     (cp->cache_hash_mask + 1) * sizeof (void *));
4089 
4090         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4091                 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4092 
4093         mutex_destroy(&cp->cache_depot_lock);
4094         mutex_destroy(&cp->cache_lock);
4095 
4096         vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4097 }
4098 
4099 /*ARGSUSED*/
4100 static int
4101 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4102 {
4103         ASSERT(MUTEX_HELD(&cpu_lock));
4104         if (what == CPU_UNCONFIG) {
4105                 kmem_cache_applyall(kmem_cache_magazine_purge,
4106                     kmem_taskq, TQ_SLEEP);
4107                 kmem_cache_applyall(kmem_cache_magazine_enable,
4108                     kmem_taskq, TQ_SLEEP);
4109         }
4110         return (0);
4111 }
4112 
4113 static void
4114 kmem_alloc_caches_create(const int *array, size_t count,
4115     kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4116 {
4117         char name[KMEM_CACHE_NAMELEN + 1];
4118         size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4119         size_t size = table_unit;
4120         int i;
4121 
4122         for (i = 0; i < count; i++) {
4123                 size_t cache_size = array[i];
4124                 size_t align = KMEM_ALIGN;
4125                 kmem_cache_t *cp;
4126 
4127                 /* if the table has an entry for maxbuf, we're done */
4128                 if (size > maxbuf)
4129                         break;
4130 
4131                 /* cache size must be a multiple of the table unit */
4132                 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4133 
4134                 /*
4135                  * If they allocate a multiple of the coherency granularity,
4136                  * they get a coherency-granularity-aligned address.
4137                  */
4138                 if (IS_P2ALIGNED(cache_size, 64))
4139                         align = 64;
4140                 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4141                         align = PAGESIZE;
4142                 (void) snprintf(name, sizeof (name),
4143                     "kmem_alloc_%lu", cache_size);
4144                 cp = kmem_cache_create(name, cache_size, align,
4145                     NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4146 
4147                 while (size <= cache_size) {
4148                         alloc_table[(size - 1) >> shift] = cp;
4149                         size += table_unit;
4150                 }
4151         }
4152 
4153         ASSERT(size > maxbuf);               /* i.e. maxbuf <= max(cache_size) */
4154 }
4155 
4156 static void
4157 kmem_cache_init(int pass, int use_large_pages)
4158 {
4159         int i;
4160         size_t maxbuf;
4161         kmem_magtype_t *mtp;
4162 
4163         for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4164                 char name[KMEM_CACHE_NAMELEN + 1];
4165 
4166                 mtp = &kmem_magtype[i];
4167                 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4168                 mtp->mt_cache = kmem_cache_create(name,
4169                     (mtp->mt_magsize + 1) * sizeof (void *),
4170                     mtp->mt_align, NULL, NULL, NULL, NULL,
4171                     kmem_msb_arena, KMC_NOHASH);
4172         }
4173 
4174         kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4175             sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4176             kmem_msb_arena, KMC_NOHASH);
4177 
4178         kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4179             sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4180             kmem_msb_arena, KMC_NOHASH);
4181 
4182         kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4183             sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4184             kmem_msb_arena, KMC_NOHASH);
4185 
4186         if (pass == 2) {
4187                 kmem_va_arena = vmem_create("kmem_va",
4188                     NULL, 0, PAGESIZE,
4189                     vmem_alloc, vmem_free, heap_arena,
4190                     8 * PAGESIZE, VM_SLEEP);
4191 
4192                 if (use_large_pages) {
4193                         kmem_default_arena = vmem_xcreate("kmem_default",
4194                             NULL, 0, PAGESIZE,
4195                             segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4196                             0, VMC_DUMPSAFE | VM_SLEEP);
4197                 } else {
4198                         kmem_default_arena = vmem_create("kmem_default",
4199                             NULL, 0, PAGESIZE,
4200                             segkmem_alloc, segkmem_free, kmem_va_arena,
4201                             0, VMC_DUMPSAFE | VM_SLEEP);
4202                 }
4203 
4204                 /* Figure out what our maximum cache size is */
4205                 maxbuf = kmem_max_cached;
4206                 if (maxbuf <= KMEM_MAXBUF) {
4207                         maxbuf = 0;
4208                         kmem_max_cached = KMEM_MAXBUF;
4209                 } else {
4210                         size_t size = 0;
4211                         size_t max =
4212                             sizeof (kmem_big_alloc_sizes) / sizeof (int);
4213                         /*
4214                          * Round maxbuf up to an existing cache size.  If maxbuf
4215                          * is larger than the largest cache, we truncate it to
4216                          * the largest cache's size.
4217                          */
4218                         for (i = 0; i < max; i++) {
4219                                 size = kmem_big_alloc_sizes[i];
4220                                 if (maxbuf <= size)
4221                                         break;
4222                         }
4223                         kmem_max_cached = maxbuf = size;
4224                 }
4225 
4226                 /*
4227                  * The big alloc table may not be completely overwritten, so
4228                  * we clear out any stale cache pointers from the first pass.
4229                  */
4230                 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4231         } else {
4232                 /*
4233                  * During the first pass, the kmem_alloc_* caches
4234                  * are treated as metadata.
4235                  */
4236                 kmem_default_arena = kmem_msb_arena;
4237                 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4238         }
4239 
4240         /*
4241          * Set up the default caches to back kmem_alloc()
4242          */
4243         kmem_alloc_caches_create(
4244             kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4245             kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4246 
4247         kmem_alloc_caches_create(
4248             kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4249             kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4250 
4251         kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4252 }
4253 
4254 void
4255 kmem_init(void)
4256 {
4257         kmem_cache_t *cp;
4258         int old_kmem_flags = kmem_flags;
4259         int use_large_pages = 0;
4260         size_t maxverify, minfirewall;
4261 
4262         kstat_init();
4263 
4264         /*
4265          * Don't do firewalled allocations if the heap is less than 1TB
4266          * (i.e. on a 32-bit kernel)
4267          * The resulting VM_NEXTFIT allocations would create too much
4268          * fragmentation in a small heap.
4269          */
4270 #if defined(_LP64)
4271         maxverify = minfirewall = PAGESIZE / 2;
4272 #else
4273         maxverify = minfirewall = ULONG_MAX;
4274 #endif
4275 
4276         /* LINTED */
4277         ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4278 
4279         list_create(&kmem_caches, sizeof (kmem_cache_t),
4280             offsetof(kmem_cache_t, cache_link));
4281 
4282         kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4283             vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4284             VM_SLEEP | VMC_NO_QCACHE);
4285 
4286         kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4287             PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4288             VMC_DUMPSAFE | VM_SLEEP);
4289 
4290         kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4291             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4292 
4293         kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4294             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4295 
4296         kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4297             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4298 
4299         kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4300             NULL, 0, PAGESIZE,
4301             kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4302             0, VM_SLEEP);
4303 
4304         kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4305             segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4306             VMC_DUMPSAFE | VM_SLEEP);
4307 
4308         /* temporary oversize arena for mod_read_system_file */
4309         kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4310             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4311 
4312         kmem_reap_interval = 15 * hz;
4313 
4314         /*
4315          * Read /etc/system.  This is a chicken-and-egg problem because
4316          * kmem_flags may be set in /etc/system, but mod_read_system_file()
4317          * needs to use the allocator.  The simplest solution is to create
4318          * all the standard kmem caches, read /etc/system, destroy all the
4319          * caches we just created, and then create them all again in light
4320          * of the (possibly) new kmem_flags and other kmem tunables.
4321          */
4322         kmem_cache_init(1, 0);
4323 
4324         mod_read_system_file(boothowto & RB_ASKNAME);
4325 
4326         while ((cp = list_tail(&kmem_caches)) != NULL)
4327                 kmem_cache_destroy(cp);
4328 
4329         vmem_destroy(kmem_oversize_arena);
4330 
4331         if (old_kmem_flags & KMF_STICKY)
4332                 kmem_flags = old_kmem_flags;
4333 
4334         if (!(kmem_flags & KMF_AUDIT))
4335                 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4336 
4337         if (kmem_maxverify == 0)
4338                 kmem_maxverify = maxverify;
4339 
4340         if (kmem_minfirewall == 0)
4341                 kmem_minfirewall = minfirewall;
4342 
4343         /*
4344          * give segkmem a chance to figure out if we are using large pages
4345          * for the kernel heap
4346          */
4347         use_large_pages = segkmem_lpsetup();
4348 
4349         /*
4350          * To protect against corruption, we keep the actual number of callers
4351          * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
4352          * to 16, since the overhead for small buffers quickly gets out of
4353          * hand.
4354          *
4355          * The real limit would depend on the needs of the largest KMC_NOHASH
4356          * cache.
4357          */
4358         kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4359         kmem_lite_pcs = kmem_lite_count;
4360 
4361         /*
4362          * Normally, we firewall oversized allocations when possible, but
4363          * if we are using large pages for kernel memory, and we don't have
4364          * any non-LITE debugging flags set, we want to allocate oversized
4365          * buffers from large pages, and so skip the firewalling.
4366          */
4367         if (use_large_pages &&
4368             ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4369                 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4370                     PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4371                     0, VMC_DUMPSAFE | VM_SLEEP);
4372         } else {
4373                 kmem_oversize_arena = vmem_create("kmem_oversize",
4374                     NULL, 0, PAGESIZE,
4375                     segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4376                     kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4377                     VM_SLEEP);
4378         }
4379 
4380         kmem_cache_init(2, use_large_pages);
4381 
4382         if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4383                 if (kmem_transaction_log_size == 0)
4384                         kmem_transaction_log_size = kmem_maxavail() / 50;
4385                 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4386         }
4387 
4388         if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4389                 if (kmem_content_log_size == 0)
4390                         kmem_content_log_size = kmem_maxavail() / 50;
4391                 kmem_content_log = kmem_log_init(kmem_content_log_size);
4392         }
4393 
4394         kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4395 
4396         kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4397 
4398         /*
4399          * Initialize STREAMS message caches so allocb() is available.
4400          * This allows us to initialize the logging framework (cmn_err(9F),
4401          * strlog(9F), etc) so we can start recording messages.
4402          */
4403         streams_msg_init();
4404 
4405         /*
4406          * Initialize the ZSD framework in Zones so modules loaded henceforth
4407          * can register their callbacks.
4408          */
4409         zone_zsd_init();
4410 
4411         log_init();
4412         taskq_init();
4413 
4414         /*
4415          * Warn about invalid or dangerous values of kmem_flags.
4416          * Always warn about unsupported values.
4417          */
4418         if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4419             KMF_CONTENTS | KMF_LITE)) != 0) ||
4420             ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4421                 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4422                     "See the Solaris Tunable Parameters Reference Manual.",
4423                     kmem_flags);
4424 
4425 #ifdef DEBUG
4426         if ((kmem_flags & KMF_DEBUG) == 0)
4427                 cmn_err(CE_NOTE, "kmem debugging disabled.");
4428 #else
4429         /*
4430          * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4431          * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4432          * if KMF_AUDIT is set). We should warn the user about the performance
4433          * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4434          * isn't set (since that disables AUDIT).
4435          */
4436         if (!(kmem_flags & KMF_LITE) &&
4437             (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4438                 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4439                     "enabled (kmem_flags = 0x%x).  Performance degradation "
4440                     "and large memory overhead possible. See the Solaris "
4441                     "Tunable Parameters Reference Manual.", kmem_flags);
4442 #endif /* not DEBUG */
4443 
4444         kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4445 
4446         kmem_ready = 1;
4447 
4448         /*
4449          * Initialize the platform-specific aligned/DMA memory allocator.
4450          */
4451         ka_init();
4452 
4453         /*
4454          * Initialize 32-bit ID cache.
4455          */
4456         id32_init();
4457 
4458         /*
4459          * Initialize the networking stack so modules loaded can
4460          * register their callbacks.
4461          */
4462         netstack_init();
4463 }
4464 
4465 static void
4466 kmem_move_init(void)
4467 {
4468         kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4469             sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4470             kmem_msb_arena, KMC_NOHASH);
4471         kmem_move_cache = kmem_cache_create("kmem_move_cache",
4472             sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4473             kmem_msb_arena, KMC_NOHASH);
4474 
4475         /*
4476          * kmem guarantees that move callbacks are sequential and that even
4477          * across multiple caches no two moves ever execute simultaneously.
4478          * Move callbacks are processed on a separate taskq so that client code
4479          * does not interfere with internal maintenance tasks.
4480          */
4481         kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4482             minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4483 }
4484 
4485 void
4486 kmem_thread_init(void)
4487 {
4488         kmem_move_init();
4489         kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4490             300, INT_MAX, TASKQ_PREPOPULATE);
4491 }
4492 
4493 void
4494 kmem_mp_init(void)
4495 {
4496         mutex_enter(&cpu_lock);
4497         register_cpu_setup_func(kmem_cpu_setup, NULL);
4498         mutex_exit(&cpu_lock);
4499 
4500         kmem_update_timeout(NULL);
4501 
4502         taskq_mp_init();
4503 }
4504 
4505 /*
4506  * Return the slab of the allocated buffer, or NULL if the buffer is not
4507  * allocated. This function may be called with a known slab address to determine
4508  * whether or not the buffer is allocated, or with a NULL slab address to obtain
4509  * an allocated buffer's slab.
4510  */
4511 static kmem_slab_t *
4512 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4513 {
4514         kmem_bufctl_t *bcp, *bufbcp;
4515 
4516         ASSERT(MUTEX_HELD(&cp->cache_lock));
4517         ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4518 
4519         if (cp->cache_flags & KMF_HASH) {
4520                 for (bcp = *KMEM_HASH(cp, buf);
4521                     (bcp != NULL) && (bcp->bc_addr != buf);
4522                     bcp = bcp->bc_next) {
4523                         continue;
4524                 }
4525                 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4526                 return (bcp == NULL ? NULL : bcp->bc_slab);
4527         }
4528 
4529         if (sp == NULL) {
4530                 sp = KMEM_SLAB(cp, buf);
4531         }
4532         bufbcp = KMEM_BUFCTL(cp, buf);
4533         for (bcp = sp->slab_head;
4534             (bcp != NULL) && (bcp != bufbcp);
4535             bcp = bcp->bc_next) {
4536                 continue;
4537         }
4538         return (bcp == NULL ? sp : NULL);
4539 }
4540 
4541 static boolean_t
4542 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4543 {
4544         long refcnt = sp->slab_refcnt;
4545 
4546         ASSERT(cp->cache_defrag != NULL);
4547 
4548         /*
4549          * For code coverage we want to be able to move an object within the
4550          * same slab (the only partial slab) even if allocating the destination
4551          * buffer resulted in a completely allocated slab.
4552          */
4553         if (flags & KMM_DEBUG) {
4554                 return ((flags & KMM_DESPERATE) ||
4555                     ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4556         }
4557 
4558         /* If we're desperate, we don't care if the client said NO. */
4559         if (flags & KMM_DESPERATE) {
4560                 return (refcnt < sp->slab_chunks); /* any partial */
4561         }
4562 
4563         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4564                 return (B_FALSE);
4565         }
4566 
4567         if ((refcnt == 1) || kmem_move_any_partial) {
4568                 return (refcnt < sp->slab_chunks);
4569         }
4570 
4571         /*
4572          * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4573          * slabs with a progressively higher percentage of used buffers can be
4574          * reclaimed until the cache as a whole is no longer fragmented.
4575          *
4576          *      sp->slab_refcnt   kmd_reclaim_numer
4577          *      --------------- < ------------------
4578          *      sp->slab_chunks   KMEM_VOID_FRACTION
4579          */
4580         return ((refcnt * KMEM_VOID_FRACTION) <
4581             (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4582 }
4583 
4584 /*
4585  * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4586  * or when the buffer is freed.
4587  */
4588 static void
4589 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4590 {
4591         ASSERT(MUTEX_HELD(&cp->cache_lock));
4592         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4593 
4594         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4595                 return;
4596         }
4597 
4598         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4599                 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4600                         avl_remove(&cp->cache_partial_slabs, sp);
4601                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4602                         sp->slab_stuck_offset = (uint32_t)-1;
4603                         avl_add(&cp->cache_partial_slabs, sp);
4604                 }
4605         } else {
4606                 sp->slab_later_count = 0;
4607                 sp->slab_stuck_offset = (uint32_t)-1;
4608         }
4609 }
4610 
4611 static void
4612 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4613 {
4614         ASSERT(taskq_member(kmem_move_taskq, curthread));
4615         ASSERT(MUTEX_HELD(&cp->cache_lock));
4616         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4617 
4618         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4619                 return;
4620         }
4621 
4622         avl_remove(&cp->cache_partial_slabs, sp);
4623         sp->slab_later_count = 0;
4624         sp->slab_flags |= KMEM_SLAB_NOMOVE;
4625         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4626         avl_add(&cp->cache_partial_slabs, sp);
4627 }
4628 
4629 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4630 
4631 /*
4632  * The move callback takes two buffer addresses, the buffer to be moved, and a
4633  * newly allocated and constructed buffer selected by kmem as the destination.
4634  * It also takes the size of the buffer and an optional user argument specified
4635  * at cache creation time. kmem guarantees that the buffer to be moved has not
4636  * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4637  * guarantee the present whereabouts of the buffer to be moved, so it is up to
4638  * the client to safely determine whether or not it is still using the buffer.
4639  * The client must not free either of the buffers passed to the move callback,
4640  * since kmem wants to free them directly to the slab layer. The client response
4641  * tells kmem which of the two buffers to free:
4642  *
4643  * YES          kmem frees the old buffer (the move was successful)
4644  * NO           kmem frees the new buffer, marks the slab of the old buffer
4645  *              non-reclaimable to avoid bothering the client again
4646  * LATER        kmem frees the new buffer, increments slab_later_count
4647  * DONT_KNOW    kmem frees the new buffer
4648  * DONT_NEED    kmem frees both the old buffer and the new buffer
4649  *
4650  * The pending callback argument now being processed contains both of the
4651  * buffers (old and new) passed to the move callback function, the slab of the
4652  * old buffer, and flags related to the move request, such as whether or not the
4653  * system was desperate for memory.
4654  *
4655  * Slabs are not freed while there is a pending callback, but instead are kept
4656  * on a deadlist, which is drained after the last callback completes. This means
4657  * that slabs are safe to access until kmem_move_end(), no matter how many of
4658  * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4659  * zero for as long as the slab remains on the deadlist and until the slab is
4660  * freed.
4661  */
4662 static void
4663 kmem_move_buffer(kmem_move_t *callback)
4664 {
4665         kmem_cbrc_t response;
4666         kmem_slab_t *sp = callback->kmm_from_slab;
4667         kmem_cache_t *cp = sp->slab_cache;
4668         boolean_t free_on_slab;
4669 
4670         ASSERT(taskq_member(kmem_move_taskq, curthread));
4671         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4672         ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4673 
4674         /*
4675          * The number of allocated buffers on the slab may have changed since we
4676          * last checked the slab's reclaimability (when the pending move was
4677          * enqueued), or the client may have responded NO when asked to move
4678          * another buffer on the same slab.
4679          */
4680         if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4681                 kmem_slab_free(cp, callback->kmm_to_buf);
4682                 kmem_move_end(cp, callback);
4683                 return;
4684         }
4685 
4686         /*
4687          * Checking the slab layer is easy, so we might as well do that here
4688          * in case we can avoid bothering the client.
4689          */
4690         mutex_enter(&cp->cache_lock);
4691         free_on_slab = (kmem_slab_allocated(cp, sp,
4692             callback->kmm_from_buf) == NULL);
4693         mutex_exit(&cp->cache_lock);
4694 
4695         if (free_on_slab) {
4696                 kmem_slab_free(cp, callback->kmm_to_buf);
4697                 kmem_move_end(cp, callback);
4698                 return;
4699         }
4700 
4701         if (cp->cache_flags & KMF_BUFTAG) {
4702                 /*
4703                  * Make kmem_cache_alloc_debug() apply the constructor for us.
4704                  */
4705                 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4706                     KM_NOSLEEP, 1, caller()) != 0) {
4707                         kmem_move_end(cp, callback);
4708                         return;
4709                 }
4710         } else if (cp->cache_constructor != NULL &&
4711             cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4712             KM_NOSLEEP) != 0) {
4713                 atomic_inc_64(&cp->cache_alloc_fail);
4714                 kmem_slab_free(cp, callback->kmm_to_buf);
4715                 kmem_move_end(cp, callback);
4716                 return;
4717         }
4718 
4719         cp->cache_defrag->kmd_callbacks++;
4720         cp->cache_defrag->kmd_thread = curthread;
4721         cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4722         cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4723         DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4724             callback);
4725 
4726         response = cp->cache_move(callback->kmm_from_buf,
4727             callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4728 
4729         DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4730             callback, kmem_cbrc_t, response);
4731         cp->cache_defrag->kmd_thread = NULL;
4732         cp->cache_defrag->kmd_from_buf = NULL;
4733         cp->cache_defrag->kmd_to_buf = NULL;
4734 
4735         if (response == KMEM_CBRC_YES) {
4736                 cp->cache_defrag->kmd_yes++;
4737                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4738                 /* slab safe to access until kmem_move_end() */
4739                 if (sp->slab_refcnt == 0)
4740                         cp->cache_defrag->kmd_slabs_freed++;
4741                 mutex_enter(&cp->cache_lock);
4742                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4743                 mutex_exit(&cp->cache_lock);
4744                 kmem_move_end(cp, callback);
4745                 return;
4746         }
4747 
4748         switch (response) {
4749         case KMEM_CBRC_NO:
4750                 cp->cache_defrag->kmd_no++;
4751                 mutex_enter(&cp->cache_lock);
4752                 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4753                 mutex_exit(&cp->cache_lock);
4754                 break;
4755         case KMEM_CBRC_LATER:
4756                 cp->cache_defrag->kmd_later++;
4757                 mutex_enter(&cp->cache_lock);
4758                 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4759                         mutex_exit(&cp->cache_lock);
4760                         break;
4761                 }
4762 
4763                 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4764                         kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4765                 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4766                         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4767                             callback->kmm_from_buf);
4768                 }
4769                 mutex_exit(&cp->cache_lock);
4770                 break;
4771         case KMEM_CBRC_DONT_NEED:
4772                 cp->cache_defrag->kmd_dont_need++;
4773                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4774                 if (sp->slab_refcnt == 0)
4775                         cp->cache_defrag->kmd_slabs_freed++;
4776                 mutex_enter(&cp->cache_lock);
4777                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4778                 mutex_exit(&cp->cache_lock);
4779                 break;
4780         case KMEM_CBRC_DONT_KNOW:
4781                 /*
4782                  * If we don't know if we can move this buffer or not, we'll
4783                  * just assume that we can't:  if the buffer is in fact free,
4784                  * then it is sitting in one of the per-CPU magazines or in
4785                  * a full magazine in the depot layer.  Either way, because
4786                  * defrag is induced in the same logic that reaps a cache,
4787                  * it's likely that full magazines will be returned to the
4788                  * system soon (thereby accomplishing what we're trying to
4789                  * accomplish here: return those magazines to their slabs).
4790                  * Given this, any work that we might do now to locate a buffer
4791                  * in a magazine is wasted (and expensive!) work; we bump
4792                  * a counter in this case and otherwise assume that we can't
4793                  * move it.
4794                  */
4795                 cp->cache_defrag->kmd_dont_know++;
4796                 break;
4797         default:
4798                 panic("'%s' (%p) unexpected move callback response %d\n",
4799                     cp->cache_name, (void *)cp, response);
4800         }
4801 
4802         kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4803         kmem_move_end(cp, callback);
4804 }
4805 
4806 /* Return B_FALSE if there is insufficient memory for the move request. */
4807 static boolean_t
4808 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4809 {
4810         void *to_buf;
4811         avl_index_t index;
4812         kmem_move_t *callback, *pending;
4813         ulong_t n;
4814 
4815         ASSERT(taskq_member(kmem_taskq, curthread));
4816         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4817         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4818 
4819         callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4820 
4821         if (callback == NULL)
4822                 return (B_FALSE);
4823 
4824         callback->kmm_from_slab = sp;
4825         callback->kmm_from_buf = buf;
4826         callback->kmm_flags = flags;
4827 
4828         mutex_enter(&cp->cache_lock);
4829 
4830         n = avl_numnodes(&cp->cache_partial_slabs);
4831         if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
4832                 mutex_exit(&cp->cache_lock);
4833                 kmem_cache_free(kmem_move_cache, callback);
4834                 return (B_TRUE); /* there is no need for the move request */
4835         }
4836 
4837         pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
4838         if (pending != NULL) {
4839                 /*
4840                  * If the move is already pending and we're desperate now,
4841                  * update the move flags.
4842                  */
4843                 if (flags & KMM_DESPERATE) {
4844                         pending->kmm_flags |= KMM_DESPERATE;
4845                 }
4846                 mutex_exit(&cp->cache_lock);
4847                 kmem_cache_free(kmem_move_cache, callback);
4848                 return (B_TRUE);
4849         }
4850 
4851         to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
4852             B_FALSE);
4853         callback->kmm_to_buf = to_buf;
4854         avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
4855 
4856         mutex_exit(&cp->cache_lock);
4857 
4858         if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
4859             callback, TQ_NOSLEEP)) {
4860                 mutex_enter(&cp->cache_lock);
4861                 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4862                 mutex_exit(&cp->cache_lock);
4863                 kmem_slab_free(cp, to_buf);
4864                 kmem_cache_free(kmem_move_cache, callback);
4865                 return (B_FALSE);
4866         }
4867 
4868         return (B_TRUE);
4869 }
4870 
4871 static void
4872 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
4873 {
4874         avl_index_t index;
4875 
4876         ASSERT(cp->cache_defrag != NULL);
4877         ASSERT(taskq_member(kmem_move_taskq, curthread));
4878         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4879 
4880         mutex_enter(&cp->cache_lock);
4881         VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
4882             callback->kmm_from_buf, &index) != NULL);
4883         avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
4884         if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
4885                 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
4886                 kmem_slab_t *sp;
4887 
4888                 /*
4889                  * The last pending move completed. Release all slabs from the
4890                  * front of the dead list except for any slab at the tail that
4891                  * needs to be released from the context of kmem_move_buffers().
4892                  * kmem deferred unmapping the buffers on these slabs in order
4893                  * to guarantee that buffers passed to the move callback have
4894                  * been touched only by kmem or by the client itself.
4895                  */
4896                 while ((sp = list_remove_head(deadlist)) != NULL) {
4897                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
4898                                 list_insert_tail(deadlist, sp);
4899                                 break;
4900                         }
4901                         cp->cache_defrag->kmd_deadcount--;
4902                         cp->cache_slab_destroy++;
4903                         mutex_exit(&cp->cache_lock);
4904                         kmem_slab_destroy(cp, sp);
4905                         mutex_enter(&cp->cache_lock);
4906                 }
4907         }
4908         mutex_exit(&cp->cache_lock);
4909         kmem_cache_free(kmem_move_cache, callback);
4910 }
4911 
4912 /*
4913  * Move buffers from least used slabs first by scanning backwards from the end
4914  * of the partial slab list. Scan at most max_scan candidate slabs and move
4915  * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
4916  * If desperate to reclaim memory, move buffers from any partial slab, otherwise
4917  * skip slabs with a ratio of allocated buffers at or above the current
4918  * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
4919  * scan is aborted) so that the caller can adjust the reclaimability threshold
4920  * depending on how many reclaimable slabs it finds.
4921  *
4922  * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
4923  * move request, since it is not valid for kmem_move_begin() to call
4924  * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
4925  */
4926 static int
4927 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
4928     int flags)
4929 {
4930         kmem_slab_t *sp;
4931         void *buf;
4932         int i, j; /* slab index, buffer index */
4933         int s; /* reclaimable slabs */
4934         int b; /* allocated (movable) buffers on reclaimable slab */
4935         boolean_t success;
4936         int refcnt;
4937         int nomove;
4938 
4939         ASSERT(taskq_member(kmem_taskq, curthread));
4940         ASSERT(MUTEX_HELD(&cp->cache_lock));
4941         ASSERT(kmem_move_cache != NULL);
4942         ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
4943         ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
4944             avl_numnodes(&cp->cache_partial_slabs) > 1);
4945 
4946         if (kmem_move_blocked) {
4947                 return (0);
4948         }
4949 
4950         if (kmem_move_fulltilt) {
4951                 flags |= KMM_DESPERATE;
4952         }
4953 
4954         if (max_scan == 0 || (flags & KMM_DESPERATE)) {
4955                 /*
4956                  * Scan as many slabs as needed to find the desired number of
4957                  * candidate slabs.
4958                  */
4959                 max_scan = (size_t)-1;
4960         }
4961 
4962         if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
4963                 /* Find as many candidate slabs as possible. */
4964                 max_slabs = (size_t)-1;
4965         }
4966 
4967         sp = avl_last(&cp->cache_partial_slabs);
4968         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
4969         for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
4970             ((sp != avl_first(&cp->cache_partial_slabs)) ||
4971             (flags & KMM_DEBUG));
4972             sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
4973 
4974                 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
4975                         continue;
4976                 }
4977                 s++;
4978 
4979                 /* Look for allocated buffers to move. */
4980                 for (j = 0, b = 0, buf = sp->slab_base;
4981                     (j < sp->slab_chunks) && (b < sp->slab_refcnt);
4982                     buf = (((char *)buf) + cp->cache_chunksize), j++) {
4983 
4984                         if (kmem_slab_allocated(cp, sp, buf) == NULL) {
4985                                 continue;
4986                         }
4987 
4988                         b++;
4989 
4990                         /*
4991                          * Prevent the slab from being destroyed while we drop
4992                          * cache_lock and while the pending move is not yet
4993                          * registered. Flag the pending move while
4994                          * kmd_moves_pending may still be empty, since we can't
4995                          * yet rely on a non-zero pending move count to prevent
4996                          * the slab from being destroyed.
4997                          */
4998                         ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
4999                         sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5000                         /*
5001                          * Recheck refcnt and nomove after reacquiring the lock,
5002                          * since these control the order of partial slabs, and
5003                          * we want to know if we can pick up the scan where we
5004                          * left off.
5005                          */
5006                         refcnt = sp->slab_refcnt;
5007                         nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5008                         mutex_exit(&cp->cache_lock);
5009 
5010                         success = kmem_move_begin(cp, sp, buf, flags);
5011 
5012                         /*
5013                          * Now, before the lock is reacquired, kmem could
5014                          * process all pending move requests and purge the
5015                          * deadlist, so that upon reacquiring the lock, sp has
5016                          * been remapped. Or, the client may free all the
5017                          * objects on the slab while the pending moves are still
5018                          * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5019                          * flag causes the slab to be put at the end of the
5020                          * deadlist and prevents it from being destroyed, since
5021                          * we plan to destroy it here after reacquiring the
5022                          * lock.
5023                          */
5024                         mutex_enter(&cp->cache_lock);
5025                         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5026                         sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5027 
5028                         if (sp->slab_refcnt == 0) {
5029                                 list_t *deadlist =
5030                                     &cp->cache_defrag->kmd_deadlist;
5031                                 list_remove(deadlist, sp);
5032 
5033                                 if (!avl_is_empty(
5034                                     &cp->cache_defrag->kmd_moves_pending)) {
5035                                         /*
5036                                          * A pending move makes it unsafe to
5037                                          * destroy the slab, because even though
5038                                          * the move is no longer needed, the
5039                                          * context where that is determined
5040                                          * requires the slab to exist.
5041                                          * Fortunately, a pending move also
5042                                          * means we don't need to destroy the
5043                                          * slab here, since it will get
5044                                          * destroyed along with any other slabs
5045                                          * on the deadlist after the last
5046                                          * pending move completes.
5047                                          */
5048                                         list_insert_head(deadlist, sp);
5049                                         return (-1);
5050                                 }
5051 
5052                                 /*
5053                                  * Destroy the slab now if it was completely
5054                                  * freed while we dropped cache_lock and there
5055                                  * are no pending moves. Since slab_refcnt
5056                                  * cannot change once it reaches zero, no new
5057                                  * pending moves from that slab are possible.
5058                                  */
5059                                 cp->cache_defrag->kmd_deadcount--;
5060                                 cp->cache_slab_destroy++;
5061                                 mutex_exit(&cp->cache_lock);
5062                                 kmem_slab_destroy(cp, sp);
5063                                 mutex_enter(&cp->cache_lock);
5064                                 /*
5065                                  * Since we can't pick up the scan where we left
5066                                  * off, abort the scan and say nothing about the
5067                                  * number of reclaimable slabs.
5068                                  */
5069                                 return (-1);
5070                         }
5071 
5072                         if (!success) {
5073                                 /*
5074                                  * Abort the scan if there is not enough memory
5075                                  * for the request and say nothing about the
5076                                  * number of reclaimable slabs.
5077                                  */
5078                                 return (-1);
5079                         }
5080 
5081                         /*
5082                          * The slab's position changed while the lock was
5083                          * dropped, so we don't know where we are in the
5084                          * sequence any more.
5085                          */
5086                         if (sp->slab_refcnt != refcnt) {
5087                                 /*
5088                                  * If this is a KMM_DEBUG move, the slab_refcnt
5089                                  * may have changed because we allocated a
5090                                  * destination buffer on the same slab. In that
5091                                  * case, we're not interested in counting it.
5092                                  */
5093                                 return (-1);
5094                         }
5095                         if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove)
5096                                 return (-1);
5097 
5098                         /*
5099                          * Generating a move request allocates a destination
5100                          * buffer from the slab layer, bumping the first partial
5101                          * slab if it is completely allocated. If the current
5102                          * slab becomes the first partial slab as a result, we
5103                          * can't continue to scan backwards.
5104                          *
5105                          * If this is a KMM_DEBUG move and we allocated the
5106                          * destination buffer from the last partial slab, then
5107                          * the buffer we're moving is on the same slab and our
5108                          * slab_refcnt has changed, causing us to return before
5109                          * reaching here if there are no partial slabs left.
5110                          */
5111                         ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5112                         if (sp == avl_first(&cp->cache_partial_slabs)) {
5113                                 /*
5114                                  * We're not interested in a second KMM_DEBUG
5115                                  * move.
5116                                  */
5117                                 goto end_scan;
5118                         }
5119                 }
5120         }
5121 end_scan:
5122 
5123         return (s);
5124 }
5125 
5126 typedef struct kmem_move_notify_args {
5127         kmem_cache_t *kmna_cache;
5128         void *kmna_buf;
5129 } kmem_move_notify_args_t;
5130 
5131 static void
5132 kmem_cache_move_notify_task(void *arg)
5133 {
5134         kmem_move_notify_args_t *args = arg;
5135         kmem_cache_t *cp = args->kmna_cache;
5136         void *buf = args->kmna_buf;
5137         kmem_slab_t *sp;
5138 
5139         ASSERT(taskq_member(kmem_taskq, curthread));
5140         ASSERT(list_link_active(&cp->cache_link));
5141 
5142         kmem_free(args, sizeof (kmem_move_notify_args_t));
5143         mutex_enter(&cp->cache_lock);
5144         sp = kmem_slab_allocated(cp, NULL, buf);
5145 
5146         /* Ignore the notification if the buffer is no longer allocated. */
5147         if (sp == NULL) {
5148                 mutex_exit(&cp->cache_lock);
5149                 return;
5150         }
5151 
5152         /* Ignore the notification if there's no reason to move the buffer. */
5153         if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5154                 /*
5155                  * So far the notification is not ignored. Ignore the
5156                  * notification if the slab is not marked by an earlier refusal
5157                  * to move a buffer.
5158                  */
5159                 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5160                     (sp->slab_later_count == 0)) {
5161                         mutex_exit(&cp->cache_lock);
5162                         return;
5163                 }
5164 
5165                 kmem_slab_move_yes(cp, sp, buf);
5166                 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5167                 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5168                 mutex_exit(&cp->cache_lock);
5169                 /* see kmem_move_buffers() about dropping the lock */
5170                 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5171                 mutex_enter(&cp->cache_lock);
5172                 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5173                 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5174                 if (sp->slab_refcnt == 0) {
5175                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5176                         list_remove(deadlist, sp);
5177 
5178                         if (!avl_is_empty(
5179                             &cp->cache_defrag->kmd_moves_pending)) {
5180                                 list_insert_head(deadlist, sp);
5181                                 mutex_exit(&cp->cache_lock);
5182                                 return;
5183                         }
5184 
5185                         cp->cache_defrag->kmd_deadcount--;
5186                         cp->cache_slab_destroy++;
5187                         mutex_exit(&cp->cache_lock);
5188                         kmem_slab_destroy(cp, sp);
5189                         return;
5190                 }
5191         } else {
5192                 kmem_slab_move_yes(cp, sp, buf);
5193         }
5194         mutex_exit(&cp->cache_lock);
5195 }
5196 
5197 void
5198 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5199 {
5200         kmem_move_notify_args_t *args;
5201 
5202         args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5203         if (args != NULL) {
5204                 args->kmna_cache = cp;
5205                 args->kmna_buf = buf;
5206                 if (!taskq_dispatch(kmem_taskq,
5207                     (task_func_t *)kmem_cache_move_notify_task, args,
5208                     TQ_NOSLEEP))
5209                         kmem_free(args, sizeof (kmem_move_notify_args_t));
5210         }
5211 }
5212 
5213 static void
5214 kmem_cache_defrag(kmem_cache_t *cp)
5215 {
5216         size_t n;
5217 
5218         ASSERT(cp->cache_defrag != NULL);
5219 
5220         mutex_enter(&cp->cache_lock);
5221         n = avl_numnodes(&cp->cache_partial_slabs);
5222         if (n > 1) {
5223                 /* kmem_move_buffers() drops and reacquires cache_lock */
5224                 cp->cache_defrag->kmd_defrags++;
5225                 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5226         }
5227         mutex_exit(&cp->cache_lock);
5228 }
5229 
5230 /* Is this cache above the fragmentation threshold? */
5231 static boolean_t
5232 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5233 {
5234         /*
5235          *      nfree           kmem_frag_numer
5236          * ------------------ > ---------------
5237          * cp->cache_buftotal        kmem_frag_denom
5238          */
5239         return ((nfree * kmem_frag_denom) >
5240             (cp->cache_buftotal * kmem_frag_numer));
5241 }
5242 
5243 static boolean_t
5244 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5245 {
5246         boolean_t fragmented;
5247         uint64_t nfree;
5248 
5249         ASSERT(MUTEX_HELD(&cp->cache_lock));
5250         *doreap = B_FALSE;
5251 
5252         if (kmem_move_fulltilt) {
5253                 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5254                         return (B_TRUE);
5255                 }
5256         } else {
5257                 if ((cp->cache_complete_slab_count + avl_numnodes(
5258                     &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5259                         return (B_FALSE);
5260                 }
5261         }
5262 
5263         nfree = cp->cache_bufslab;
5264         fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5265             kmem_cache_frag_threshold(cp, nfree));
5266 
5267         /*
5268          * Free buffers in the magazine layer appear allocated from the point of
5269          * view of the slab layer. We want to know if the slab layer would
5270          * appear fragmented if we included free buffers from magazines that
5271          * have fallen out of the working set.
5272          */
5273         if (!fragmented) {
5274                 long reap;
5275 
5276                 mutex_enter(&cp->cache_depot_lock);
5277                 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5278                 reap = MIN(reap, cp->cache_full.ml_total);
5279                 mutex_exit(&cp->cache_depot_lock);
5280 
5281                 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5282                 if (kmem_cache_frag_threshold(cp, nfree)) {
5283                         *doreap = B_TRUE;
5284                 }
5285         }
5286 
5287         return (fragmented);
5288 }
5289 
5290 /* Called periodically from kmem_taskq */
5291 static void
5292 kmem_cache_scan(kmem_cache_t *cp)
5293 {
5294         boolean_t reap = B_FALSE;
5295         kmem_defrag_t *kmd;
5296 
5297         ASSERT(taskq_member(kmem_taskq, curthread));
5298 
5299         mutex_enter(&cp->cache_lock);
5300 
5301         kmd = cp->cache_defrag;
5302         if (kmd->kmd_consolidate > 0) {
5303                 kmd->kmd_consolidate--;
5304                 mutex_exit(&cp->cache_lock);
5305                 kmem_cache_reap(cp);
5306                 return;
5307         }
5308 
5309         if (kmem_cache_is_fragmented(cp, &reap)) {
5310                 size_t slabs_found;
5311 
5312                 /*
5313                  * Consolidate reclaimable slabs from the end of the partial
5314                  * slab list (scan at most kmem_reclaim_scan_range slabs to find
5315                  * reclaimable slabs). Keep track of how many candidate slabs we
5316                  * looked for and how many we actually found so we can adjust
5317                  * the definition of a candidate slab if we're having trouble
5318                  * finding them.
5319                  *
5320                  * kmem_move_buffers() drops and reacquires cache_lock.
5321                  */
5322                 kmd->kmd_scans++;
5323                 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5324                     kmem_reclaim_max_slabs, 0);
5325                 if (slabs_found >= 0) {
5326                         kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5327                         kmd->kmd_slabs_found += slabs_found;
5328                 }
5329 
5330                 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5331                         kmd->kmd_tries = 0;
5332 
5333                         /*
5334                          * If we had difficulty finding candidate slabs in
5335                          * previous scans, adjust the threshold so that
5336                          * candidates are easier to find.
5337                          */
5338                         if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5339                                 kmem_adjust_reclaim_threshold(kmd, -1);
5340                         } else if ((kmd->kmd_slabs_found * 2) <
5341                             kmd->kmd_slabs_sought) {
5342                                 kmem_adjust_reclaim_threshold(kmd, 1);
5343                         }
5344                         kmd->kmd_slabs_sought = 0;
5345                         kmd->kmd_slabs_found = 0;
5346                 }
5347         } else {
5348                 kmem_reset_reclaim_threshold(cp->cache_defrag);
5349 #ifdef  DEBUG
5350                 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5351                         /*
5352                          * In a debug kernel we want the consolidator to
5353                          * run occasionally even when there is plenty of
5354                          * memory.
5355                          */
5356                         uint16_t debug_rand;
5357 
5358                         (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5359                         if (!kmem_move_noreap &&
5360                             ((debug_rand % kmem_mtb_reap) == 0)) {
5361                                 mutex_exit(&cp->cache_lock);
5362                                 kmem_cache_reap(cp);
5363                                 return;
5364                         } else if ((debug_rand % kmem_mtb_move) == 0) {
5365                                 kmd->kmd_scans++;
5366                                 (void) kmem_move_buffers(cp,
5367                                     kmem_reclaim_scan_range, 1, KMM_DEBUG);
5368                         }
5369                 }
5370 #endif  /* DEBUG */
5371         }
5372 
5373         mutex_exit(&cp->cache_lock);
5374 
5375         if (reap)
5376                 kmem_depot_ws_reap(cp);
5377 }