1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  24  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
  25  * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
  26  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  27  */
  28 
  29 /*
  30  * DVA-based Adjustable Replacement Cache
  31  *
  32  * While much of the theory of operation used here is
  33  * based on the self-tuning, low overhead replacement cache
  34  * presented by Megiddo and Modha at FAST 2003, there are some
  35  * significant differences:
  36  *
  37  * 1. The Megiddo and Modha model assumes any page is evictable.
  38  * Pages in its cache cannot be "locked" into memory.  This makes
  39  * the eviction algorithm simple: evict the last page in the list.
  40  * This also make the performance characteristics easy to reason
  41  * about.  Our cache is not so simple.  At any given moment, some
  42  * subset of the blocks in the cache are un-evictable because we
  43  * have handed out a reference to them.  Blocks are only evictable
  44  * when there are no external references active.  This makes
  45  * eviction far more problematic:  we choose to evict the evictable
  46  * blocks that are the "lowest" in the list.
  47  *
  48  * There are times when it is not possible to evict the requested
  49  * space.  In these circumstances we are unable to adjust the cache
  50  * size.  To prevent the cache growing unbounded at these times we
  51  * implement a "cache throttle" that slows the flow of new data
  52  * into the cache until we can make space available.
  53  *
  54  * 2. The Megiddo and Modha model assumes a fixed cache size.
  55  * Pages are evicted when the cache is full and there is a cache
  56  * miss.  Our model has a variable sized cache.  It grows with
  57  * high use, but also tries to react to memory pressure from the
  58  * operating system: decreasing its size when system memory is
  59  * tight.
  60  *
  61  * 3. The Megiddo and Modha model assumes a fixed page size. All
  62  * elements of the cache are therefore exactly the same size.  So
  63  * when adjusting the cache size following a cache miss, its simply
  64  * a matter of choosing a single page to evict.  In our model, we
  65  * have variable sized cache blocks (rangeing from 512 bytes to
  66  * 128K bytes).  We therefore choose a set of blocks to evict to make
  67  * space for a cache miss that approximates as closely as possible
  68  * the space used by the new block.
  69  *
  70  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  71  * by N. Megiddo & D. Modha, FAST 2003
  72  */
  73 
  74 /*
  75  * The locking model:
  76  *
  77  * A new reference to a cache buffer can be obtained in two
  78  * ways: 1) via a hash table lookup using the DVA as a key,
  79  * or 2) via one of the ARC lists.  The arc_read() interface
  80  * uses method 1, while the internal arc algorithms for
  81  * adjusting the cache use method 2.  We therefore provide two
  82  * types of locks: 1) the hash table lock array, and 2) the
  83  * arc list locks.
  84  *
  85  * Buffers do not have their own mutexes, rather they rely on the
  86  * hash table mutexes for the bulk of their protection (i.e. most
  87  * fields in the arc_buf_hdr_t are protected by these mutexes).
  88  *
  89  * buf_hash_find() returns the appropriate mutex (held) when it
  90  * locates the requested buffer in the hash table.  It returns
  91  * NULL for the mutex if the buffer was not in the table.
  92  *
  93  * buf_hash_remove() expects the appropriate hash mutex to be
  94  * already held before it is invoked.
  95  *
  96  * Each arc state also has a mutex which is used to protect the
  97  * buffer list associated with the state.  When attempting to
  98  * obtain a hash table lock while holding an arc list lock you
  99  * must use: mutex_tryenter() to avoid deadlock.  Also note that
 100  * the active state mutex must be held before the ghost state mutex.
 101  *
 102  * Arc buffers may have an associated eviction callback function.
 103  * This function will be invoked prior to removing the buffer (e.g.
 104  * in arc_do_user_evicts()).  Note however that the data associated
 105  * with the buffer may be evicted prior to the callback.  The callback
 106  * must be made with *no locks held* (to prevent deadlock).  Additionally,
 107  * the users of callbacks must ensure that their private data is
 108  * protected from simultaneous callbacks from arc_clear_callback()
 109  * and arc_do_user_evicts().
 110  *
 111  * Note that the majority of the performance stats are manipulated
 112  * with atomic operations.
 113  *
 114  * The L2ARC uses the l2ad_mtx on each vdev for the following:
 115  *
 116  *      - L2ARC buflist creation
 117  *      - L2ARC buflist eviction
 118  *      - L2ARC write completion, which walks L2ARC buflists
 119  *      - ARC header destruction, as it removes from L2ARC buflists
 120  *      - ARC header release, as it removes from L2ARC buflists
 121  */
 122 
 123 #include <sys/spa.h>
 124 #include <sys/zio.h>
 125 #include <sys/zio_compress.h>
 126 #include <sys/zfs_context.h>
 127 #include <sys/arc.h>
 128 #include <sys/refcount.h>
 129 #include <sys/vdev.h>
 130 #include <sys/vdev_impl.h>
 131 #include <sys/dsl_pool.h>
 132 #include <sys/multilist.h>
 133 #ifdef _KERNEL
 134 #include <sys/vmsystm.h>
 135 #include <vm/anon.h>
 136 #include <sys/fs/swapnode.h>
 137 #include <sys/dnlc.h>
 138 #endif
 139 #include <sys/callb.h>
 140 #include <sys/kstat.h>
 141 #include <zfs_fletcher.h>
 142 
 143 #ifndef _KERNEL
 144 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
 145 boolean_t arc_watch = B_FALSE;
 146 int arc_procfd;
 147 #endif
 148 
 149 static kmutex_t         arc_reclaim_lock;
 150 static kcondvar_t       arc_reclaim_thread_cv;
 151 static boolean_t        arc_reclaim_thread_exit;
 152 static kcondvar_t       arc_reclaim_waiters_cv;
 153 
 154 static kmutex_t         arc_user_evicts_lock;
 155 static kcondvar_t       arc_user_evicts_cv;
 156 static boolean_t        arc_user_evicts_thread_exit;
 157 
 158 uint_t arc_reduce_dnlc_percent = 3;
 159 
 160 /*
 161  * The number of headers to evict in arc_evict_state_impl() before
 162  * dropping the sublist lock and evicting from another sublist. A lower
 163  * value means we're more likely to evict the "correct" header (i.e. the
 164  * oldest header in the arc state), but comes with higher overhead
 165  * (i.e. more invocations of arc_evict_state_impl()).
 166  */
 167 int zfs_arc_evict_batch_limit = 10;
 168 
 169 /*
 170  * The number of sublists used for each of the arc state lists. If this
 171  * is not set to a suitable value by the user, it will be configured to
 172  * the number of CPUs on the system in arc_init().
 173  */
 174 int zfs_arc_num_sublists_per_state = 0;
 175 
 176 /* number of seconds before growing cache again */
 177 static int              arc_grow_retry = 60;
 178 
 179 /* shift of arc_c for calculating overflow limit in arc_get_data_buf */
 180 int             zfs_arc_overflow_shift = 8;
 181 
 182 /* shift of arc_c for calculating both min and max arc_p */
 183 static int              arc_p_min_shift = 4;
 184 
 185 /* log2(fraction of arc to reclaim) */
 186 static int              arc_shrink_shift = 7;
 187 
 188 /*
 189  * log2(fraction of ARC which must be free to allow growing).
 190  * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
 191  * when reading a new block into the ARC, we will evict an equal-sized block
 192  * from the ARC.
 193  *
 194  * This must be less than arc_shrink_shift, so that when we shrink the ARC,
 195  * we will still not allow it to grow.
 196  */
 197 int                     arc_no_grow_shift = 5;
 198 
 199 
 200 /*
 201  * minimum lifespan of a prefetch block in clock ticks
 202  * (initialized in arc_init())
 203  */
 204 static int              arc_min_prefetch_lifespan;
 205 
 206 /*
 207  * If this percent of memory is free, don't throttle.
 208  */
 209 int arc_lotsfree_percent = 10;
 210 
 211 static int arc_dead;
 212 
 213 /*
 214  * The arc has filled available memory and has now warmed up.
 215  */
 216 static boolean_t arc_warm;
 217 
 218 /*
 219  * These tunables are for performance analysis.
 220  */
 221 uint64_t zfs_arc_max;
 222 uint64_t zfs_arc_min;
 223 uint64_t zfs_arc_meta_limit = 0;
 224 uint64_t zfs_arc_meta_min = 0;
 225 int zfs_arc_grow_retry = 0;
 226 int zfs_arc_shrink_shift = 0;
 227 int zfs_arc_p_min_shift = 0;
 228 int zfs_disable_dup_eviction = 0;
 229 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
 230 
 231 /*
 232  * Note that buffers can be in one of 6 states:
 233  *      ARC_anon        - anonymous (discussed below)
 234  *      ARC_mru         - recently used, currently cached
 235  *      ARC_mru_ghost   - recentely used, no longer in cache
 236  *      ARC_mfu         - frequently used, currently cached
 237  *      ARC_mfu_ghost   - frequently used, no longer in cache
 238  *      ARC_l2c_only    - exists in L2ARC but not other states
 239  * When there are no active references to the buffer, they are
 240  * are linked onto a list in one of these arc states.  These are
 241  * the only buffers that can be evicted or deleted.  Within each
 242  * state there are multiple lists, one for meta-data and one for
 243  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 244  * etc.) is tracked separately so that it can be managed more
 245  * explicitly: favored over data, limited explicitly.
 246  *
 247  * Anonymous buffers are buffers that are not associated with
 248  * a DVA.  These are buffers that hold dirty block copies
 249  * before they are written to stable storage.  By definition,
 250  * they are "ref'd" and are considered part of arc_mru
 251  * that cannot be freed.  Generally, they will aquire a DVA
 252  * as they are written and migrate onto the arc_mru list.
 253  *
 254  * The ARC_l2c_only state is for buffers that are in the second
 255  * level ARC but no longer in any of the ARC_m* lists.  The second
 256  * level ARC itself may also contain buffers that are in any of
 257  * the ARC_m* states - meaning that a buffer can exist in two
 258  * places.  The reason for the ARC_l2c_only state is to keep the
 259  * buffer header in the hash table, so that reads that hit the
 260  * second level ARC benefit from these fast lookups.
 261  */
 262 
 263 typedef struct arc_state {
 264         /*
 265          * list of evictable buffers
 266          */
 267         multilist_t arcs_list[ARC_BUFC_NUMTYPES];
 268         /*
 269          * total amount of evictable data in this state
 270          */
 271         uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];
 272         /*
 273          * total amount of data in this state; this includes: evictable,
 274          * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
 275          */
 276         refcount_t arcs_size;
 277 } arc_state_t;
 278 
 279 /* The 6 states: */
 280 static arc_state_t ARC_anon;
 281 static arc_state_t ARC_mru;
 282 static arc_state_t ARC_mru_ghost;
 283 static arc_state_t ARC_mfu;
 284 static arc_state_t ARC_mfu_ghost;
 285 static arc_state_t ARC_l2c_only;
 286 
 287 typedef struct arc_stats {
 288         kstat_named_t arcstat_hits;
 289         kstat_named_t arcstat_misses;
 290         kstat_named_t arcstat_demand_data_hits;
 291         kstat_named_t arcstat_demand_data_misses;
 292         kstat_named_t arcstat_demand_metadata_hits;
 293         kstat_named_t arcstat_demand_metadata_misses;
 294         kstat_named_t arcstat_prefetch_data_hits;
 295         kstat_named_t arcstat_prefetch_data_misses;
 296         kstat_named_t arcstat_prefetch_metadata_hits;
 297         kstat_named_t arcstat_prefetch_metadata_misses;
 298         kstat_named_t arcstat_mru_hits;
 299         kstat_named_t arcstat_mru_ghost_hits;
 300         kstat_named_t arcstat_mfu_hits;
 301         kstat_named_t arcstat_mfu_ghost_hits;
 302         kstat_named_t arcstat_deleted;
 303         /*
 304          * Number of buffers that could not be evicted because the hash lock
 305          * was held by another thread.  The lock may not necessarily be held
 306          * by something using the same buffer, since hash locks are shared
 307          * by multiple buffers.
 308          */
 309         kstat_named_t arcstat_mutex_miss;
 310         /*
 311          * Number of buffers skipped because they have I/O in progress, are
 312          * indrect prefetch buffers that have not lived long enough, or are
 313          * not from the spa we're trying to evict from.
 314          */
 315         kstat_named_t arcstat_evict_skip;
 316         /*
 317          * Number of times arc_evict_state() was unable to evict enough
 318          * buffers to reach it's target amount.
 319          */
 320         kstat_named_t arcstat_evict_not_enough;
 321         kstat_named_t arcstat_evict_l2_cached;
 322         kstat_named_t arcstat_evict_l2_eligible;
 323         kstat_named_t arcstat_evict_l2_ineligible;
 324         kstat_named_t arcstat_evict_l2_skip;
 325         kstat_named_t arcstat_hash_elements;
 326         kstat_named_t arcstat_hash_elements_max;
 327         kstat_named_t arcstat_hash_collisions;
 328         kstat_named_t arcstat_hash_chains;
 329         kstat_named_t arcstat_hash_chain_max;
 330         kstat_named_t arcstat_p;
 331         kstat_named_t arcstat_c;
 332         kstat_named_t arcstat_c_min;
 333         kstat_named_t arcstat_c_max;
 334         kstat_named_t arcstat_size;
 335         /*
 336          * Number of bytes consumed by internal ARC structures necessary
 337          * for tracking purposes; these structures are not actually
 338          * backed by ARC buffers. This includes arc_buf_hdr_t structures
 339          * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
 340          * caches), and arc_buf_t structures (allocated via arc_buf_t
 341          * cache).
 342          */
 343         kstat_named_t arcstat_hdr_size;
 344         /*
 345          * Number of bytes consumed by ARC buffers of type equal to
 346          * ARC_BUFC_DATA. This is generally consumed by buffers backing
 347          * on disk user data (e.g. plain file contents).
 348          */
 349         kstat_named_t arcstat_data_size;
 350         /*
 351          * Number of bytes consumed by ARC buffers of type equal to
 352          * ARC_BUFC_METADATA. This is generally consumed by buffers
 353          * backing on disk data that is used for internal ZFS
 354          * structures (e.g. ZAP, dnode, indirect blocks, etc).
 355          */
 356         kstat_named_t arcstat_metadata_size;
 357         /*
 358          * Number of bytes consumed by various buffers and structures
 359          * not actually backed with ARC buffers. This includes bonus
 360          * buffers (allocated directly via zio_buf_* functions),
 361          * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
 362          * cache), and dnode_t structures (allocated via dnode_t cache).
 363          */
 364         kstat_named_t arcstat_other_size;
 365         /*
 366          * Total number of bytes consumed by ARC buffers residing in the
 367          * arc_anon state. This includes *all* buffers in the arc_anon
 368          * state; e.g. data, metadata, evictable, and unevictable buffers
 369          * are all included in this value.
 370          */
 371         kstat_named_t arcstat_anon_size;
 372         /*
 373          * Number of bytes consumed by ARC buffers that meet the
 374          * following criteria: backing buffers of type ARC_BUFC_DATA,
 375          * residing in the arc_anon state, and are eligible for eviction
 376          * (e.g. have no outstanding holds on the buffer).
 377          */
 378         kstat_named_t arcstat_anon_evictable_data;
 379         /*
 380          * Number of bytes consumed by ARC buffers that meet the
 381          * following criteria: backing buffers of type ARC_BUFC_METADATA,
 382          * residing in the arc_anon state, and are eligible for eviction
 383          * (e.g. have no outstanding holds on the buffer).
 384          */
 385         kstat_named_t arcstat_anon_evictable_metadata;
 386         /*
 387          * Total number of bytes consumed by ARC buffers residing in the
 388          * arc_mru state. This includes *all* buffers in the arc_mru
 389          * state; e.g. data, metadata, evictable, and unevictable buffers
 390          * are all included in this value.
 391          */
 392         kstat_named_t arcstat_mru_size;
 393         /*
 394          * Number of bytes consumed by ARC buffers that meet the
 395          * following criteria: backing buffers of type ARC_BUFC_DATA,
 396          * residing in the arc_mru state, and are eligible for eviction
 397          * (e.g. have no outstanding holds on the buffer).
 398          */
 399         kstat_named_t arcstat_mru_evictable_data;
 400         /*
 401          * Number of bytes consumed by ARC buffers that meet the
 402          * following criteria: backing buffers of type ARC_BUFC_METADATA,
 403          * residing in the arc_mru state, and are eligible for eviction
 404          * (e.g. have no outstanding holds on the buffer).
 405          */
 406         kstat_named_t arcstat_mru_evictable_metadata;
 407         /*
 408          * Total number of bytes that *would have been* consumed by ARC
 409          * buffers in the arc_mru_ghost state. The key thing to note
 410          * here, is the fact that this size doesn't actually indicate
 411          * RAM consumption. The ghost lists only consist of headers and
 412          * don't actually have ARC buffers linked off of these headers.
 413          * Thus, *if* the headers had associated ARC buffers, these
 414          * buffers *would have* consumed this number of bytes.
 415          */
 416         kstat_named_t arcstat_mru_ghost_size;
 417         /*
 418          * Number of bytes that *would have been* consumed by ARC
 419          * buffers that are eligible for eviction, of type
 420          * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
 421          */
 422         kstat_named_t arcstat_mru_ghost_evictable_data;
 423         /*
 424          * Number of bytes that *would have been* consumed by ARC
 425          * buffers that are eligible for eviction, of type
 426          * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
 427          */
 428         kstat_named_t arcstat_mru_ghost_evictable_metadata;
 429         /*
 430          * Total number of bytes consumed by ARC buffers residing in the
 431          * arc_mfu state. This includes *all* buffers in the arc_mfu
 432          * state; e.g. data, metadata, evictable, and unevictable buffers
 433          * are all included in this value.
 434          */
 435         kstat_named_t arcstat_mfu_size;
 436         /*
 437          * Number of bytes consumed by ARC buffers that are eligible for
 438          * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
 439          * state.
 440          */
 441         kstat_named_t arcstat_mfu_evictable_data;
 442         /*
 443          * Number of bytes consumed by ARC buffers that are eligible for
 444          * eviction, of type ARC_BUFC_METADATA, and reside in the
 445          * arc_mfu state.
 446          */
 447         kstat_named_t arcstat_mfu_evictable_metadata;
 448         /*
 449          * Total number of bytes that *would have been* consumed by ARC
 450          * buffers in the arc_mfu_ghost state. See the comment above
 451          * arcstat_mru_ghost_size for more details.
 452          */
 453         kstat_named_t arcstat_mfu_ghost_size;
 454         /*
 455          * Number of bytes that *would have been* consumed by ARC
 456          * buffers that are eligible for eviction, of type
 457          * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
 458          */
 459         kstat_named_t arcstat_mfu_ghost_evictable_data;
 460         /*
 461          * Number of bytes that *would have been* consumed by ARC
 462          * buffers that are eligible for eviction, of type
 463          * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
 464          */
 465         kstat_named_t arcstat_mfu_ghost_evictable_metadata;
 466         kstat_named_t arcstat_l2_hits;
 467         kstat_named_t arcstat_l2_misses;
 468         kstat_named_t arcstat_l2_feeds;
 469         kstat_named_t arcstat_l2_rw_clash;
 470         kstat_named_t arcstat_l2_read_bytes;
 471         kstat_named_t arcstat_l2_write_bytes;
 472         kstat_named_t arcstat_l2_writes_sent;
 473         kstat_named_t arcstat_l2_writes_done;
 474         kstat_named_t arcstat_l2_writes_error;
 475         kstat_named_t arcstat_l2_writes_lock_retry;
 476         kstat_named_t arcstat_l2_evict_lock_retry;
 477         kstat_named_t arcstat_l2_evict_reading;
 478         kstat_named_t arcstat_l2_evict_l1cached;
 479         kstat_named_t arcstat_l2_free_on_write;
 480         kstat_named_t arcstat_l2_cdata_free_on_write;
 481         kstat_named_t arcstat_l2_abort_lowmem;
 482         kstat_named_t arcstat_l2_cksum_bad;
 483         kstat_named_t arcstat_l2_io_error;
 484         kstat_named_t arcstat_l2_size;
 485         kstat_named_t arcstat_l2_asize;
 486         kstat_named_t arcstat_l2_hdr_size;
 487         kstat_named_t arcstat_l2_compress_successes;
 488         kstat_named_t arcstat_l2_compress_zeros;
 489         kstat_named_t arcstat_l2_compress_failures;
 490         kstat_named_t arcstat_memory_throttle_count;
 491         kstat_named_t arcstat_duplicate_buffers;
 492         kstat_named_t arcstat_duplicate_buffers_size;
 493         kstat_named_t arcstat_duplicate_reads;
 494         kstat_named_t arcstat_meta_used;
 495         kstat_named_t arcstat_meta_limit;
 496         kstat_named_t arcstat_meta_max;
 497         kstat_named_t arcstat_meta_min;
 498 } arc_stats_t;
 499 
 500 static arc_stats_t arc_stats = {
 501         { "hits",                       KSTAT_DATA_UINT64 },
 502         { "misses",                     KSTAT_DATA_UINT64 },
 503         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 504         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 505         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 506         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 507         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 508         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 509         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 510         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 511         { "mru_hits",                   KSTAT_DATA_UINT64 },
 512         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 513         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 514         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 515         { "deleted",                    KSTAT_DATA_UINT64 },
 516         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 517         { "evict_skip",                 KSTAT_DATA_UINT64 },
 518         { "evict_not_enough",           KSTAT_DATA_UINT64 },
 519         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 520         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 521         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 522         { "evict_l2_skip",              KSTAT_DATA_UINT64 },
 523         { "hash_elements",              KSTAT_DATA_UINT64 },
 524         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 525         { "hash_collisions",            KSTAT_DATA_UINT64 },
 526         { "hash_chains",                KSTAT_DATA_UINT64 },
 527         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 528         { "p",                          KSTAT_DATA_UINT64 },
 529         { "c",                          KSTAT_DATA_UINT64 },
 530         { "c_min",                      KSTAT_DATA_UINT64 },
 531         { "c_max",                      KSTAT_DATA_UINT64 },
 532         { "size",                       KSTAT_DATA_UINT64 },
 533         { "hdr_size",                   KSTAT_DATA_UINT64 },
 534         { "data_size",                  KSTAT_DATA_UINT64 },
 535         { "metadata_size",              KSTAT_DATA_UINT64 },
 536         { "other_size",                 KSTAT_DATA_UINT64 },
 537         { "anon_size",                  KSTAT_DATA_UINT64 },
 538         { "anon_evictable_data",        KSTAT_DATA_UINT64 },
 539         { "anon_evictable_metadata",    KSTAT_DATA_UINT64 },
 540         { "mru_size",                   KSTAT_DATA_UINT64 },
 541         { "mru_evictable_data",         KSTAT_DATA_UINT64 },
 542         { "mru_evictable_metadata",     KSTAT_DATA_UINT64 },
 543         { "mru_ghost_size",             KSTAT_DATA_UINT64 },
 544         { "mru_ghost_evictable_data",   KSTAT_DATA_UINT64 },
 545         { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
 546         { "mfu_size",                   KSTAT_DATA_UINT64 },
 547         { "mfu_evictable_data",         KSTAT_DATA_UINT64 },
 548         { "mfu_evictable_metadata",     KSTAT_DATA_UINT64 },
 549         { "mfu_ghost_size",             KSTAT_DATA_UINT64 },
 550         { "mfu_ghost_evictable_data",   KSTAT_DATA_UINT64 },
 551         { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
 552         { "l2_hits",                    KSTAT_DATA_UINT64 },
 553         { "l2_misses",                  KSTAT_DATA_UINT64 },
 554         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 555         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 556         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 557         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 558         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 559         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 560         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 561         { "l2_writes_lock_retry",       KSTAT_DATA_UINT64 },
 562         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 563         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 564         { "l2_evict_l1cached",          KSTAT_DATA_UINT64 },
 565         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 566         { "l2_cdata_free_on_write",     KSTAT_DATA_UINT64 },
 567         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 568         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 569         { "l2_io_error",                KSTAT_DATA_UINT64 },
 570         { "l2_size",                    KSTAT_DATA_UINT64 },
 571         { "l2_asize",                   KSTAT_DATA_UINT64 },
 572         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 573         { "l2_compress_successes",      KSTAT_DATA_UINT64 },
 574         { "l2_compress_zeros",          KSTAT_DATA_UINT64 },
 575         { "l2_compress_failures",       KSTAT_DATA_UINT64 },
 576         { "memory_throttle_count",      KSTAT_DATA_UINT64 },
 577         { "duplicate_buffers",          KSTAT_DATA_UINT64 },
 578         { "duplicate_buffers_size",     KSTAT_DATA_UINT64 },
 579         { "duplicate_reads",            KSTAT_DATA_UINT64 },
 580         { "arc_meta_used",              KSTAT_DATA_UINT64 },
 581         { "arc_meta_limit",             KSTAT_DATA_UINT64 },
 582         { "arc_meta_max",               KSTAT_DATA_UINT64 },
 583         { "arc_meta_min",               KSTAT_DATA_UINT64 }
 584 };
 585 
 586 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 587 
 588 #define ARCSTAT_INCR(stat, val) \
 589         atomic_add_64(&arc_stats.stat.value.ui64, (val))
 590 
 591 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 592 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 593 
 594 #define ARCSTAT_MAX(stat, val) {                                        \
 595         uint64_t m;                                                     \
 596         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 597             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 598                 continue;                                               \
 599 }
 600 
 601 #define ARCSTAT_MAXSTAT(stat) \
 602         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 603 
 604 /*
 605  * We define a macro to allow ARC hits/misses to be easily broken down by
 606  * two separate conditions, giving a total of four different subtypes for
 607  * each of hits and misses (so eight statistics total).
 608  */
 609 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 610         if (cond1) {                                                    \
 611                 if (cond2) {                                            \
 612                         ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
 613                 } else {                                                \
 614                         ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
 615                 }                                                       \
 616         } else {                                                        \
 617                 if (cond2) {                                            \
 618                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
 619                 } else {                                                \
 620                         ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
 621                 }                                                       \
 622         }
 623 
 624 kstat_t                 *arc_ksp;
 625 static arc_state_t      *arc_anon;
 626 static arc_state_t      *arc_mru;
 627 static arc_state_t      *arc_mru_ghost;
 628 static arc_state_t      *arc_mfu;
 629 static arc_state_t      *arc_mfu_ghost;
 630 static arc_state_t      *arc_l2c_only;
 631 
 632 /*
 633  * There are several ARC variables that are critical to export as kstats --
 634  * but we don't want to have to grovel around in the kstat whenever we wish to
 635  * manipulate them.  For these variables, we therefore define them to be in
 636  * terms of the statistic variable.  This assures that we are not introducing
 637  * the possibility of inconsistency by having shadow copies of the variables,
 638  * while still allowing the code to be readable.
 639  */
 640 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 641 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 642 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 643 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 644 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 645 #define arc_meta_limit  ARCSTAT(arcstat_meta_limit) /* max size for metadata */
 646 #define arc_meta_min    ARCSTAT(arcstat_meta_min) /* min size for metadata */
 647 #define arc_meta_used   ARCSTAT(arcstat_meta_used) /* size of metadata */
 648 #define arc_meta_max    ARCSTAT(arcstat_meta_max) /* max size of metadata */
 649 
 650 #define L2ARC_IS_VALID_COMPRESS(_c_) \
 651         ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
 652 
 653 static int              arc_no_grow;    /* Don't try to grow cache size */
 654 static uint64_t         arc_tempreserve;
 655 static uint64_t         arc_loaned_bytes;
 656 
 657 typedef struct arc_callback arc_callback_t;
 658 
 659 struct arc_callback {
 660         void                    *acb_private;
 661         arc_done_func_t         *acb_done;
 662         arc_buf_t               *acb_buf;
 663         zio_t                   *acb_zio_dummy;
 664         arc_callback_t          *acb_next;
 665 };
 666 
 667 typedef struct arc_write_callback arc_write_callback_t;
 668 
 669 struct arc_write_callback {
 670         void            *awcb_private;
 671         arc_done_func_t *awcb_ready;
 672         arc_done_func_t *awcb_physdone;
 673         arc_done_func_t *awcb_done;
 674         arc_buf_t       *awcb_buf;
 675 };
 676 
 677 /*
 678  * ARC buffers are separated into multiple structs as a memory saving measure:
 679  *   - Common fields struct, always defined, and embedded within it:
 680  *       - L2-only fields, always allocated but undefined when not in L2ARC
 681  *       - L1-only fields, only allocated when in L1ARC
 682  *
 683  *           Buffer in L1                     Buffer only in L2
 684  *    +------------------------+          +------------------------+
 685  *    | arc_buf_hdr_t          |          | arc_buf_hdr_t          |
 686  *    |                        |          |                        |
 687  *    |                        |          |                        |
 688  *    |                        |          |                        |
 689  *    +------------------------+          +------------------------+
 690  *    | l2arc_buf_hdr_t        |          | l2arc_buf_hdr_t        |
 691  *    | (undefined if L1-only) |          |                        |
 692  *    +------------------------+          +------------------------+
 693  *    | l1arc_buf_hdr_t        |
 694  *    |                        |
 695  *    |                        |
 696  *    |                        |
 697  *    |                        |
 698  *    +------------------------+
 699  *
 700  * Because it's possible for the L2ARC to become extremely large, we can wind
 701  * up eating a lot of memory in L2ARC buffer headers, so the size of a header
 702  * is minimized by only allocating the fields necessary for an L1-cached buffer
 703  * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
 704  * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
 705  * words in pointers. arc_hdr_realloc() is used to switch a header between
 706  * these two allocation states.
 707  */
 708 typedef struct l1arc_buf_hdr {
 709         kmutex_t                b_freeze_lock;
 710 #ifdef ZFS_DEBUG
 711         /*
 712          * used for debugging wtih kmem_flags - by allocating and freeing
 713          * b_thawed when the buffer is thawed, we get a record of the stack
 714          * trace that thawed it.
 715          */
 716         void                    *b_thawed;
 717 #endif
 718 
 719         arc_buf_t               *b_buf;
 720         uint32_t                b_datacnt;
 721         /* for waiting on writes to complete */
 722         kcondvar_t              b_cv;
 723 
 724         /* protected by arc state mutex */
 725         arc_state_t             *b_state;
 726         multilist_node_t        b_arc_node;
 727 
 728         /* updated atomically */
 729         clock_t                 b_arc_access;
 730 
 731         /* self protecting */
 732         refcount_t              b_refcnt;
 733 
 734         arc_callback_t          *b_acb;
 735         /* temporary buffer holder for in-flight compressed data */
 736         void                    *b_tmp_cdata;
 737 } l1arc_buf_hdr_t;
 738 
 739 typedef struct l2arc_dev l2arc_dev_t;
 740 
 741 typedef struct l2arc_buf_hdr {
 742         /* protected by arc_buf_hdr mutex */
 743         l2arc_dev_t             *b_dev;         /* L2ARC device */
 744         uint64_t                b_daddr;        /* disk address, offset byte */
 745         /* real alloc'd buffer size depending on b_compress applied */
 746         int32_t                 b_asize;
 747         uint8_t                 b_compress;
 748 
 749         list_node_t             b_l2node;
 750 } l2arc_buf_hdr_t;
 751 
 752 struct arc_buf_hdr {
 753         /* protected by hash lock */
 754         dva_t                   b_dva;
 755         uint64_t                b_birth;
 756         /*
 757          * Even though this checksum is only set/verified when a buffer is in
 758          * the L1 cache, it needs to be in the set of common fields because it
 759          * must be preserved from the time before a buffer is written out to
 760          * L2ARC until after it is read back in.
 761          */
 762         zio_cksum_t             *b_freeze_cksum;
 763 
 764         arc_buf_hdr_t           *b_hash_next;
 765         arc_flags_t             b_flags;
 766 
 767         /* immutable */
 768         int32_t                 b_size;
 769         uint64_t                b_spa;
 770 
 771         /* L2ARC fields. Undefined when not in L2ARC. */
 772         l2arc_buf_hdr_t         b_l2hdr;
 773         /* L1ARC fields. Undefined when in l2arc_only state */
 774         l1arc_buf_hdr_t         b_l1hdr;
 775 };
 776 
 777 static arc_buf_t *arc_eviction_list;
 778 static arc_buf_hdr_t arc_eviction_hdr;
 779 
 780 #define GHOST_STATE(state)      \
 781         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
 782         (state) == arc_l2c_only)
 783 
 784 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
 785 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
 786 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
 787 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_FLAG_PREFETCH)
 788 #define HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
 789 #define HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
 790 
 791 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_FLAG_L2CACHE)
 792 #define HDR_L2COMPRESS(hdr)     ((hdr)->b_flags & ARC_FLAG_L2COMPRESS)
 793 #define HDR_L2_READING(hdr)     \
 794             (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&       \
 795             ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
 796 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
 797 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
 798 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
 799 
 800 #define HDR_ISTYPE_METADATA(hdr)        \
 801             ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
 802 #define HDR_ISTYPE_DATA(hdr)    (!HDR_ISTYPE_METADATA(hdr))
 803 
 804 #define HDR_HAS_L1HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
 805 #define HDR_HAS_L2HDR(hdr)      ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
 806 
 807 /*
 808  * Other sizes
 809  */
 810 
 811 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
 812 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
 813 
 814 /*
 815  * Hash table routines
 816  */
 817 
 818 #define HT_LOCK_PAD     64
 819 
 820 struct ht_lock {
 821         kmutex_t        ht_lock;
 822 #ifdef _KERNEL
 823         unsigned char   pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
 824 #endif
 825 };
 826 
 827 #define BUF_LOCKS 256
 828 typedef struct buf_hash_table {
 829         uint64_t ht_mask;
 830         arc_buf_hdr_t **ht_table;
 831         struct ht_lock ht_locks[BUF_LOCKS];
 832 } buf_hash_table_t;
 833 
 834 static buf_hash_table_t buf_hash_table;
 835 
 836 #define BUF_HASH_INDEX(spa, dva, birth) \
 837         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
 838 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
 839 #define BUF_HASH_LOCK(idx)      (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
 840 #define HDR_LOCK(hdr) \
 841         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
 842 
 843 uint64_t zfs_crc64_table[256];
 844 
 845 /*
 846  * Level 2 ARC
 847  */
 848 
 849 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 850 #define L2ARC_HEADROOM          2                       /* num of writes */
 851 /*
 852  * If we discover during ARC scan any buffers to be compressed, we boost
 853  * our headroom for the next scanning cycle by this percentage multiple.
 854  */
 855 #define L2ARC_HEADROOM_BOOST    200
 856 #define L2ARC_FEED_SECS         1               /* caching interval secs */
 857 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 858 
 859 /*
 860  * Used to distinguish headers that are being process by
 861  * l2arc_write_buffers(), but have yet to be assigned to a l2arc disk
 862  * address. This can happen when the header is added to the l2arc's list
 863  * of buffers to write in the first stage of l2arc_write_buffers(), but
 864  * has not yet been written out which happens in the second stage of
 865  * l2arc_write_buffers().
 866  */
 867 #define L2ARC_ADDR_UNSET        ((uint64_t)(-1))
 868 
 869 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 870 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 871 
 872 /* L2ARC Performance Tunables */
 873 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 874 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 875 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 876 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
 877 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 878 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 879 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 880 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 881 boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
 882 
 883 /*
 884  * L2ARC Internals
 885  */
 886 struct l2arc_dev {
 887         vdev_t                  *l2ad_vdev;     /* vdev */
 888         spa_t                   *l2ad_spa;      /* spa */
 889         uint64_t                l2ad_hand;      /* next write location */
 890         uint64_t                l2ad_start;     /* first addr on device */
 891         uint64_t                l2ad_end;       /* last addr on device */
 892         boolean_t               l2ad_first;     /* first sweep through */
 893         boolean_t               l2ad_writing;   /* currently writing */
 894         kmutex_t                l2ad_mtx;       /* lock for buffer list */
 895         list_t                  l2ad_buflist;   /* buffer list */
 896         list_node_t             l2ad_node;      /* device list node */
 897         refcount_t              l2ad_alloc;     /* allocated bytes */
 898 };
 899 
 900 static list_t L2ARC_dev_list;                   /* device list */
 901 static list_t *l2arc_dev_list;                  /* device list pointer */
 902 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
 903 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
 904 static list_t L2ARC_free_on_write;              /* free after write buf list */
 905 static list_t *l2arc_free_on_write;             /* free after write list ptr */
 906 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
 907 static uint64_t l2arc_ndev;                     /* number of devices */
 908 
 909 typedef struct l2arc_read_callback {
 910         arc_buf_t               *l2rcb_buf;             /* read buffer */
 911         spa_t                   *l2rcb_spa;             /* spa */
 912         blkptr_t                l2rcb_bp;               /* original blkptr */
 913         zbookmark_phys_t        l2rcb_zb;               /* original bookmark */
 914         int                     l2rcb_flags;            /* original flags */
 915         enum zio_compress       l2rcb_compress;         /* applied compress */
 916 } l2arc_read_callback_t;
 917 
 918 typedef struct l2arc_write_callback {
 919         l2arc_dev_t     *l2wcb_dev;             /* device info */
 920         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
 921 } l2arc_write_callback_t;
 922 
 923 typedef struct l2arc_data_free {
 924         /* protected by l2arc_free_on_write_mtx */
 925         void            *l2df_data;
 926         size_t          l2df_size;
 927         void            (*l2df_func)(void *, size_t);
 928         list_node_t     l2df_list_node;
 929 } l2arc_data_free_t;
 930 
 931 static kmutex_t l2arc_feed_thr_lock;
 932 static kcondvar_t l2arc_feed_thr_cv;
 933 static uint8_t l2arc_thread_exit;
 934 
 935 static void arc_get_data_buf(arc_buf_t *);
 936 static void arc_access(arc_buf_hdr_t *, kmutex_t *);
 937 static boolean_t arc_is_overflowing();
 938 static void arc_buf_watch(arc_buf_t *);
 939 
 940 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
 941 static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
 942 
 943 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
 944 static void l2arc_read_done(zio_t *);
 945 
 946 static boolean_t l2arc_compress_buf(arc_buf_hdr_t *);
 947 static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress);
 948 static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
 949 
 950 static uint64_t
 951 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 952 {
 953         uint8_t *vdva = (uint8_t *)dva;
 954         uint64_t crc = -1ULL;
 955         int i;
 956 
 957         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 958 
 959         for (i = 0; i < sizeof (dva_t); i++)
 960                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
 961 
 962         crc ^= (spa>>8) ^ birth;
 963 
 964         return (crc);
 965 }
 966 
 967 #define BUF_EMPTY(buf)                                          \
 968         ((buf)->b_dva.dva_word[0] == 0 &&                    \
 969         (buf)->b_dva.dva_word[1] == 0)
 970 
 971 #define BUF_EQUAL(spa, dva, birth, buf)                         \
 972         ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
 973         ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
 974         ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
 975 
 976 static void
 977 buf_discard_identity(arc_buf_hdr_t *hdr)
 978 {
 979         hdr->b_dva.dva_word[0] = 0;
 980         hdr->b_dva.dva_word[1] = 0;
 981         hdr->b_birth = 0;
 982 }
 983 
 984 static arc_buf_hdr_t *
 985 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
 986 {
 987         const dva_t *dva = BP_IDENTITY(bp);
 988         uint64_t birth = BP_PHYSICAL_BIRTH(bp);
 989         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 990         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 991         arc_buf_hdr_t *hdr;
 992 
 993         mutex_enter(hash_lock);
 994         for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
 995             hdr = hdr->b_hash_next) {
 996                 if (BUF_EQUAL(spa, dva, birth, hdr)) {
 997                         *lockp = hash_lock;
 998                         return (hdr);
 999                 }
1000         }
1001         mutex_exit(hash_lock);
1002         *lockp = NULL;
1003         return (NULL);
1004 }
1005 
1006 /*
1007  * Insert an entry into the hash table.  If there is already an element
1008  * equal to elem in the hash table, then the already existing element
1009  * will be returned and the new element will not be inserted.
1010  * Otherwise returns NULL.
1011  * If lockp == NULL, the caller is assumed to already hold the hash lock.
1012  */
1013 static arc_buf_hdr_t *
1014 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
1015 {
1016         uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1017         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1018         arc_buf_hdr_t *fhdr;
1019         uint32_t i;
1020 
1021         ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
1022         ASSERT(hdr->b_birth != 0);
1023         ASSERT(!HDR_IN_HASH_TABLE(hdr));
1024 
1025         if (lockp != NULL) {
1026                 *lockp = hash_lock;
1027                 mutex_enter(hash_lock);
1028         } else {
1029                 ASSERT(MUTEX_HELD(hash_lock));
1030         }
1031 
1032         for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
1033             fhdr = fhdr->b_hash_next, i++) {
1034                 if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
1035                         return (fhdr);
1036         }
1037 
1038         hdr->b_hash_next = buf_hash_table.ht_table[idx];
1039         buf_hash_table.ht_table[idx] = hdr;
1040         hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
1041 
1042         /* collect some hash table performance data */
1043         if (i > 0) {
1044                 ARCSTAT_BUMP(arcstat_hash_collisions);
1045                 if (i == 1)
1046                         ARCSTAT_BUMP(arcstat_hash_chains);
1047 
1048                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
1049         }
1050 
1051         ARCSTAT_BUMP(arcstat_hash_elements);
1052         ARCSTAT_MAXSTAT(arcstat_hash_elements);
1053 
1054         return (NULL);
1055 }
1056 
1057 static void
1058 buf_hash_remove(arc_buf_hdr_t *hdr)
1059 {
1060         arc_buf_hdr_t *fhdr, **hdrp;
1061         uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1062 
1063         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
1064         ASSERT(HDR_IN_HASH_TABLE(hdr));
1065 
1066         hdrp = &buf_hash_table.ht_table[idx];
1067         while ((fhdr = *hdrp) != hdr) {
1068                 ASSERT(fhdr != NULL);
1069                 hdrp = &fhdr->b_hash_next;
1070         }
1071         *hdrp = hdr->b_hash_next;
1072         hdr->b_hash_next = NULL;
1073         hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE;
1074 
1075         /* collect some hash table performance data */
1076         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1077 
1078         if (buf_hash_table.ht_table[idx] &&
1079             buf_hash_table.ht_table[idx]->b_hash_next == NULL)
1080                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1081 }
1082 
1083 /*
1084  * Global data structures and functions for the buf kmem cache.
1085  */
1086 static kmem_cache_t *hdr_full_cache;
1087 static kmem_cache_t *hdr_l2only_cache;
1088 static kmem_cache_t *buf_cache;
1089 
1090 static void
1091 buf_fini(void)
1092 {
1093         int i;
1094 
1095         kmem_free(buf_hash_table.ht_table,
1096             (buf_hash_table.ht_mask + 1) * sizeof (void *));
1097         for (i = 0; i < BUF_LOCKS; i++)
1098                 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1099         kmem_cache_destroy(hdr_full_cache);
1100         kmem_cache_destroy(hdr_l2only_cache);
1101         kmem_cache_destroy(buf_cache);
1102 }
1103 
1104 /*
1105  * Constructor callback - called when the cache is empty
1106  * and a new buf is requested.
1107  */
1108 /* ARGSUSED */
1109 static int
1110 hdr_full_cons(void *vbuf, void *unused, int kmflag)
1111 {
1112         arc_buf_hdr_t *hdr = vbuf;
1113 
1114         bzero(hdr, HDR_FULL_SIZE);
1115         cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
1116         refcount_create(&hdr->b_l1hdr.b_refcnt);
1117         mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1118         multilist_link_init(&hdr->b_l1hdr.b_arc_node);
1119         arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1120 
1121         return (0);
1122 }
1123 
1124 /* ARGSUSED */
1125 static int
1126 hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
1127 {
1128         arc_buf_hdr_t *hdr = vbuf;
1129 
1130         bzero(hdr, HDR_L2ONLY_SIZE);
1131         arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1132 
1133         return (0);
1134 }
1135 
1136 /* ARGSUSED */
1137 static int
1138 buf_cons(void *vbuf, void *unused, int kmflag)
1139 {
1140         arc_buf_t *buf = vbuf;
1141 
1142         bzero(buf, sizeof (arc_buf_t));
1143         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1144         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1145 
1146         return (0);
1147 }
1148 
1149 /*
1150  * Destructor callback - called when a cached buf is
1151  * no longer required.
1152  */
1153 /* ARGSUSED */
1154 static void
1155 hdr_full_dest(void *vbuf, void *unused)
1156 {
1157         arc_buf_hdr_t *hdr = vbuf;
1158 
1159         ASSERT(BUF_EMPTY(hdr));
1160         cv_destroy(&hdr->b_l1hdr.b_cv);
1161         refcount_destroy(&hdr->b_l1hdr.b_refcnt);
1162         mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
1163         ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
1164         arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1165 }
1166 
1167 /* ARGSUSED */
1168 static void
1169 hdr_l2only_dest(void *vbuf, void *unused)
1170 {
1171         arc_buf_hdr_t *hdr = vbuf;
1172 
1173         ASSERT(BUF_EMPTY(hdr));
1174         arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1175 }
1176 
1177 /* ARGSUSED */
1178 static void
1179 buf_dest(void *vbuf, void *unused)
1180 {
1181         arc_buf_t *buf = vbuf;
1182 
1183         mutex_destroy(&buf->b_evict_lock);
1184         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1185 }
1186 
1187 /*
1188  * Reclaim callback -- invoked when memory is low.
1189  */
1190 /* ARGSUSED */
1191 static void
1192 hdr_recl(void *unused)
1193 {
1194         dprintf("hdr_recl called\n");
1195         /*
1196          * umem calls the reclaim func when we destroy the buf cache,
1197          * which is after we do arc_fini().
1198          */
1199         if (!arc_dead)
1200                 cv_signal(&arc_reclaim_thread_cv);
1201 }
1202 
1203 static void
1204 buf_init(void)
1205 {
1206         uint64_t *ct;
1207         uint64_t hsize = 1ULL << 12;
1208         int i, j;
1209 
1210         /*
1211          * The hash table is big enough to fill all of physical memory
1212          * with an average block size of zfs_arc_average_blocksize (default 8K).
1213          * By default, the table will take up
1214          * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1215          */
1216         while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
1217                 hsize <<= 1;
1218 retry:
1219         buf_hash_table.ht_mask = hsize - 1;
1220         buf_hash_table.ht_table =
1221             kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1222         if (buf_hash_table.ht_table == NULL) {
1223                 ASSERT(hsize > (1ULL << 8));
1224                 hsize >>= 1;
1225                 goto retry;
1226         }
1227 
1228         hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
1229             0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
1230         hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
1231             HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
1232             NULL, NULL, 0);
1233         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1234             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1235 
1236         for (i = 0; i < 256; i++)
1237                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1238                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1239 
1240         for (i = 0; i < BUF_LOCKS; i++) {
1241                 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1242                     NULL, MUTEX_DEFAULT, NULL);
1243         }
1244 }
1245 
1246 /*
1247  * Transition between the two allocation states for the arc_buf_hdr struct.
1248  * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
1249  * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
1250  * version is used when a cache buffer is only in the L2ARC in order to reduce
1251  * memory usage.
1252  */
1253 static arc_buf_hdr_t *
1254 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
1255 {
1256         ASSERT(HDR_HAS_L2HDR(hdr));
1257 
1258         arc_buf_hdr_t *nhdr;
1259         l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
1260 
1261         ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
1262             (old == hdr_l2only_cache && new == hdr_full_cache));
1263 
1264         nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
1265 
1266         ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
1267         buf_hash_remove(hdr);
1268 
1269         bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
1270 
1271         if (new == hdr_full_cache) {
1272                 nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
1273                 /*
1274                  * arc_access and arc_change_state need to be aware that a
1275                  * header has just come out of L2ARC, so we set its state to
1276                  * l2c_only even though it's about to change.
1277                  */
1278                 nhdr->b_l1hdr.b_state = arc_l2c_only;
1279 
1280                 /* Verify previous threads set to NULL before freeing */
1281                 ASSERT3P(nhdr->b_l1hdr.b_tmp_cdata, ==, NULL);
1282         } else {
1283                 ASSERT(hdr->b_l1hdr.b_buf == NULL);
1284                 ASSERT0(hdr->b_l1hdr.b_datacnt);
1285 
1286                 /*
1287                  * If we've reached here, We must have been called from
1288                  * arc_evict_hdr(), as such we should have already been
1289                  * removed from any ghost list we were previously on
1290                  * (which protects us from racing with arc_evict_state),
1291                  * thus no locking is needed during this check.
1292                  */
1293                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
1294 
1295                 /*
1296                  * A buffer must not be moved into the arc_l2c_only
1297                  * state if it's not finished being written out to the
1298                  * l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field
1299                  * might try to be accessed, even though it was removed.
1300                  */
1301                 VERIFY(!HDR_L2_WRITING(hdr));
1302                 VERIFY3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
1303 
1304 #ifdef ZFS_DEBUG
1305                 if (hdr->b_l1hdr.b_thawed != NULL)
1306                         kmem_free(hdr->b_l1hdr.b_thawed, 1);
1307                         hdr->b_l1hdr.b_thawed = NULL;
1308 #endif
1309 
1310                 nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR;
1311         }
1312         /*
1313          * The header has been reallocated so we need to re-insert it into any
1314          * lists it was on.
1315          */
1316         (void) buf_hash_insert(nhdr, NULL);
1317 
1318         ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
1319 
1320         mutex_enter(&dev->l2ad_mtx);
1321 
1322         /*
1323          * We must place the realloc'ed header back into the list at
1324          * the same spot. Otherwise, if it's placed earlier in the list,
1325          * l2arc_write_buffers() could find it during the function's
1326          * write phase, and try to write it out to the l2arc.
1327          */
1328         list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
1329         list_remove(&dev->l2ad_buflist, hdr);
1330 
1331         mutex_exit(&dev->l2ad_mtx);
1332 
1333         /*
1334          * Since we're using the pointer address as the tag when
1335          * incrementing and decrementing the l2ad_alloc refcount, we
1336          * must remove the old pointer (that we're about to destroy) and
1337          * add the new pointer to the refcount. Otherwise we'd remove
1338          * the wrong pointer address when calling arc_hdr_destroy() later.
1339          */
1340 
1341         (void) refcount_remove_many(&dev->l2ad_alloc,
1342             hdr->b_l2hdr.b_asize, hdr);
1343 
1344         (void) refcount_add_many(&dev->l2ad_alloc,
1345             nhdr->b_l2hdr.b_asize, nhdr);
1346 
1347         buf_discard_identity(hdr);
1348         hdr->b_freeze_cksum = NULL;
1349         kmem_cache_free(old, hdr);
1350 
1351         return (nhdr);
1352 }
1353 
1354 
1355 #define ARC_MINTIME     (hz>>4) /* 62 ms */
1356 
1357 static void
1358 arc_cksum_verify(arc_buf_t *buf)
1359 {
1360         zio_cksum_t zc;
1361 
1362         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1363                 return;
1364 
1365         mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1366         if (buf->b_hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(buf->b_hdr)) {
1367                 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1368                 return;
1369         }
1370         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1371         if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1372                 panic("buffer modified while frozen!");
1373         mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1374 }
1375 
1376 static int
1377 arc_cksum_equal(arc_buf_t *buf)
1378 {
1379         zio_cksum_t zc;
1380         int equal;
1381 
1382         mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1383         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1384         equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1385         mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1386 
1387         return (equal);
1388 }
1389 
1390 static void
1391 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1392 {
1393         if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1394                 return;
1395 
1396         mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1397         if (buf->b_hdr->b_freeze_cksum != NULL) {
1398                 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1399                 return;
1400         }
1401         buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1402         fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1403             buf->b_hdr->b_freeze_cksum);
1404         mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1405         arc_buf_watch(buf);
1406 }
1407 
1408 #ifndef _KERNEL
1409 typedef struct procctl {
1410         long cmd;
1411         prwatch_t prwatch;
1412 } procctl_t;
1413 #endif
1414 
1415 /* ARGSUSED */
1416 static void
1417 arc_buf_unwatch(arc_buf_t *buf)
1418 {
1419 #ifndef _KERNEL
1420         if (arc_watch) {
1421                 int result;
1422                 procctl_t ctl;
1423                 ctl.cmd = PCWATCH;
1424                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1425                 ctl.prwatch.pr_size = 0;
1426                 ctl.prwatch.pr_wflags = 0;
1427                 result = write(arc_procfd, &ctl, sizeof (ctl));
1428                 ASSERT3U(result, ==, sizeof (ctl));
1429         }
1430 #endif
1431 }
1432 
1433 /* ARGSUSED */
1434 static void
1435 arc_buf_watch(arc_buf_t *buf)
1436 {
1437 #ifndef _KERNEL
1438         if (arc_watch) {
1439                 int result;
1440                 procctl_t ctl;
1441                 ctl.cmd = PCWATCH;
1442                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1443                 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1444                 ctl.prwatch.pr_wflags = WA_WRITE;
1445                 result = write(arc_procfd, &ctl, sizeof (ctl));
1446                 ASSERT3U(result, ==, sizeof (ctl));
1447         }
1448 #endif
1449 }
1450 
1451 static arc_buf_contents_t
1452 arc_buf_type(arc_buf_hdr_t *hdr)
1453 {
1454         if (HDR_ISTYPE_METADATA(hdr)) {
1455                 return (ARC_BUFC_METADATA);
1456         } else {
1457                 return (ARC_BUFC_DATA);
1458         }
1459 }
1460 
1461 static uint32_t
1462 arc_bufc_to_flags(arc_buf_contents_t type)
1463 {
1464         switch (type) {
1465         case ARC_BUFC_DATA:
1466                 /* metadata field is 0 if buffer contains normal data */
1467                 return (0);
1468         case ARC_BUFC_METADATA:
1469                 return (ARC_FLAG_BUFC_METADATA);
1470         default:
1471                 break;
1472         }
1473         panic("undefined ARC buffer type!");
1474         return ((uint32_t)-1);
1475 }
1476 
1477 void
1478 arc_buf_thaw(arc_buf_t *buf)
1479 {
1480         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1481                 if (buf->b_hdr->b_l1hdr.b_state != arc_anon)
1482                         panic("modifying non-anon buffer!");
1483                 if (HDR_IO_IN_PROGRESS(buf->b_hdr))
1484                         panic("modifying buffer while i/o in progress!");
1485                 arc_cksum_verify(buf);
1486         }
1487 
1488         mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1489         if (buf->b_hdr->b_freeze_cksum != NULL) {
1490                 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1491                 buf->b_hdr->b_freeze_cksum = NULL;
1492         }
1493 
1494 #ifdef ZFS_DEBUG
1495         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1496                 if (buf->b_hdr->b_l1hdr.b_thawed != NULL)
1497                         kmem_free(buf->b_hdr->b_l1hdr.b_thawed, 1);
1498                 buf->b_hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP);
1499         }
1500 #endif
1501 
1502         mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1503 
1504         arc_buf_unwatch(buf);
1505 }
1506 
1507 void
1508 arc_buf_freeze(arc_buf_t *buf)
1509 {
1510         kmutex_t *hash_lock;
1511 
1512         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1513                 return;
1514 
1515         hash_lock = HDR_LOCK(buf->b_hdr);
1516         mutex_enter(hash_lock);
1517 
1518         ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1519             buf->b_hdr->b_l1hdr.b_state == arc_anon);
1520         arc_cksum_compute(buf, B_FALSE);
1521         mutex_exit(hash_lock);
1522 
1523 }
1524 
1525 static void
1526 add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
1527 {
1528         ASSERT(HDR_HAS_L1HDR(hdr));
1529         ASSERT(MUTEX_HELD(hash_lock));
1530         arc_state_t *state = hdr->b_l1hdr.b_state;
1531 
1532         if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
1533             (state != arc_anon)) {
1534                 /* We don't use the L2-only state list. */
1535                 if (state != arc_l2c_only) {
1536                         arc_buf_contents_t type = arc_buf_type(hdr);
1537                         uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt;
1538                         multilist_t *list = &state->arcs_list[type];
1539                         uint64_t *size = &state->arcs_lsize[type];
1540 
1541                         multilist_remove(list, hdr);
1542 
1543                         if (GHOST_STATE(state)) {
1544                                 ASSERT0(hdr->b_l1hdr.b_datacnt);
1545                                 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
1546                                 delta = hdr->b_size;
1547                         }
1548                         ASSERT(delta > 0);
1549                         ASSERT3U(*size, >=, delta);
1550                         atomic_add_64(size, -delta);
1551                 }
1552                 /* remove the prefetch flag if we get a reference */
1553                 hdr->b_flags &= ~ARC_FLAG_PREFETCH;
1554         }
1555 }
1556 
1557 static int
1558 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
1559 {
1560         int cnt;
1561         arc_state_t *state = hdr->b_l1hdr.b_state;
1562 
1563         ASSERT(HDR_HAS_L1HDR(hdr));
1564         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1565         ASSERT(!GHOST_STATE(state));
1566 
1567         /*
1568          * arc_l2c_only counts as a ghost state so we don't need to explicitly
1569          * check to prevent usage of the arc_l2c_only list.
1570          */
1571         if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
1572             (state != arc_anon)) {
1573                 arc_buf_contents_t type = arc_buf_type(hdr);
1574                 multilist_t *list = &state->arcs_list[type];
1575                 uint64_t *size = &state->arcs_lsize[type];
1576 
1577                 multilist_insert(list, hdr);
1578 
1579                 ASSERT(hdr->b_l1hdr.b_datacnt > 0);
1580                 atomic_add_64(size, hdr->b_size *
1581                     hdr->b_l1hdr.b_datacnt);
1582         }
1583         return (cnt);
1584 }
1585 
1586 /*
1587  * Move the supplied buffer to the indicated state. The hash lock
1588  * for the buffer must be held by the caller.
1589  */
1590 static void
1591 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
1592     kmutex_t *hash_lock)
1593 {
1594         arc_state_t *old_state;
1595         int64_t refcnt;
1596         uint32_t datacnt;
1597         uint64_t from_delta, to_delta;
1598         arc_buf_contents_t buftype = arc_buf_type(hdr);
1599 
1600         /*
1601          * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
1602          * in arc_read() when bringing a buffer out of the L2ARC.  However, the
1603          * L1 hdr doesn't always exist when we change state to arc_anon before
1604          * destroying a header, in which case reallocating to add the L1 hdr is
1605          * pointless.
1606          */
1607         if (HDR_HAS_L1HDR(hdr)) {
1608                 old_state = hdr->b_l1hdr.b_state;
1609                 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
1610                 datacnt = hdr->b_l1hdr.b_datacnt;
1611         } else {
1612                 old_state = arc_l2c_only;
1613                 refcnt = 0;
1614                 datacnt = 0;
1615         }
1616 
1617         ASSERT(MUTEX_HELD(hash_lock));
1618         ASSERT3P(new_state, !=, old_state);
1619         ASSERT(refcnt == 0 || datacnt > 0);
1620         ASSERT(!GHOST_STATE(new_state) || datacnt == 0);
1621         ASSERT(old_state != arc_anon || datacnt <= 1);
1622 
1623         from_delta = to_delta = datacnt * hdr->b_size;
1624 
1625         /*
1626          * If this buffer is evictable, transfer it from the
1627          * old state list to the new state list.
1628          */
1629         if (refcnt == 0) {
1630                 if (old_state != arc_anon && old_state != arc_l2c_only) {
1631                         uint64_t *size = &old_state->arcs_lsize[buftype];
1632 
1633                         ASSERT(HDR_HAS_L1HDR(hdr));
1634                         multilist_remove(&old_state->arcs_list[buftype], hdr);
1635 
1636                         /*
1637                          * If prefetching out of the ghost cache,
1638                          * we will have a non-zero datacnt.
1639                          */
1640                         if (GHOST_STATE(old_state) && datacnt == 0) {
1641                                 /* ghost elements have a ghost size */
1642                                 ASSERT(hdr->b_l1hdr.b_buf == NULL);
1643                                 from_delta = hdr->b_size;
1644                         }
1645                         ASSERT3U(*size, >=, from_delta);
1646                         atomic_add_64(size, -from_delta);
1647                 }
1648                 if (new_state != arc_anon && new_state != arc_l2c_only) {
1649                         uint64_t *size = &new_state->arcs_lsize[buftype];
1650 
1651                         /*
1652                          * An L1 header always exists here, since if we're
1653                          * moving to some L1-cached state (i.e. not l2c_only or
1654                          * anonymous), we realloc the header to add an L1hdr
1655                          * beforehand.
1656                          */
1657                         ASSERT(HDR_HAS_L1HDR(hdr));
1658                         multilist_insert(&new_state->arcs_list[buftype], hdr);
1659 
1660                         /* ghost elements have a ghost size */
1661                         if (GHOST_STATE(new_state)) {
1662                                 ASSERT0(datacnt);
1663                                 ASSERT(hdr->b_l1hdr.b_buf == NULL);
1664                                 to_delta = hdr->b_size;
1665                         }
1666                         atomic_add_64(size, to_delta);
1667                 }
1668         }
1669 
1670         ASSERT(!BUF_EMPTY(hdr));
1671         if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
1672                 buf_hash_remove(hdr);
1673 
1674         /* adjust state sizes (ignore arc_l2c_only) */
1675 
1676         if (to_delta && new_state != arc_l2c_only) {
1677                 ASSERT(HDR_HAS_L1HDR(hdr));
1678                 if (GHOST_STATE(new_state)) {
1679                         ASSERT0(datacnt);
1680 
1681                         /*
1682                          * We moving a header to a ghost state, we first
1683                          * remove all arc buffers. Thus, we'll have a
1684                          * datacnt of zero, and no arc buffer to use for
1685                          * the reference. As a result, we use the arc
1686                          * header pointer for the reference.
1687                          */
1688                         (void) refcount_add_many(&new_state->arcs_size,
1689                             hdr->b_size, hdr);
1690                 } else {
1691                         ASSERT3U(datacnt, !=, 0);
1692 
1693                         /*
1694                          * Each individual buffer holds a unique reference,
1695                          * thus we must remove each of these references one
1696                          * at a time.
1697                          */
1698                         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
1699                             buf = buf->b_next) {
1700                                 (void) refcount_add_many(&new_state->arcs_size,
1701                                     hdr->b_size, buf);
1702                         }
1703                 }
1704         }
1705 
1706         if (from_delta && old_state != arc_l2c_only) {
1707                 ASSERT(HDR_HAS_L1HDR(hdr));
1708                 if (GHOST_STATE(old_state)) {
1709                         /*
1710                          * When moving a header off of a ghost state,
1711                          * there's the possibility for datacnt to be
1712                          * non-zero. This is because we first add the
1713                          * arc buffer to the header prior to changing
1714                          * the header's state. Since we used the header
1715                          * for the reference when putting the header on
1716                          * the ghost state, we must balance that and use
1717                          * the header when removing off the ghost state
1718                          * (even though datacnt is non zero).
1719                          */
1720 
1721                         IMPLY(datacnt == 0, new_state == arc_anon ||
1722                             new_state == arc_l2c_only);
1723 
1724                         (void) refcount_remove_many(&old_state->arcs_size,
1725                             hdr->b_size, hdr);
1726                 } else {
1727                         ASSERT3P(datacnt, !=, 0);
1728 
1729                         /*
1730                          * Each individual buffer holds a unique reference,
1731                          * thus we must remove each of these references one
1732                          * at a time.
1733                          */
1734                         for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
1735                             buf = buf->b_next) {
1736                                 (void) refcount_remove_many(
1737                                     &old_state->arcs_size, hdr->b_size, buf);
1738                         }
1739                 }
1740         }
1741 
1742         if (HDR_HAS_L1HDR(hdr))
1743                 hdr->b_l1hdr.b_state = new_state;
1744 
1745         /*
1746          * L2 headers should never be on the L2 state list since they don't
1747          * have L1 headers allocated.
1748          */
1749         ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
1750             multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
1751 }
1752 
1753 void
1754 arc_space_consume(uint64_t space, arc_space_type_t type)
1755 {
1756         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1757 
1758         switch (type) {
1759         case ARC_SPACE_DATA:
1760                 ARCSTAT_INCR(arcstat_data_size, space);
1761                 break;
1762         case ARC_SPACE_META:
1763                 ARCSTAT_INCR(arcstat_metadata_size, space);
1764                 break;
1765         case ARC_SPACE_OTHER:
1766                 ARCSTAT_INCR(arcstat_other_size, space);
1767                 break;
1768         case ARC_SPACE_HDRS:
1769                 ARCSTAT_INCR(arcstat_hdr_size, space);
1770                 break;
1771         case ARC_SPACE_L2HDRS:
1772                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1773                 break;
1774         }
1775 
1776         if (type != ARC_SPACE_DATA)
1777                 ARCSTAT_INCR(arcstat_meta_used, space);
1778 
1779         atomic_add_64(&arc_size, space);
1780 }
1781 
1782 void
1783 arc_space_return(uint64_t space, arc_space_type_t type)
1784 {
1785         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1786 
1787         switch (type) {
1788         case ARC_SPACE_DATA:
1789                 ARCSTAT_INCR(arcstat_data_size, -space);
1790                 break;
1791         case ARC_SPACE_META:
1792                 ARCSTAT_INCR(arcstat_metadata_size, -space);
1793                 break;
1794         case ARC_SPACE_OTHER:
1795                 ARCSTAT_INCR(arcstat_other_size, -space);
1796                 break;
1797         case ARC_SPACE_HDRS:
1798                 ARCSTAT_INCR(arcstat_hdr_size, -space);
1799                 break;
1800         case ARC_SPACE_L2HDRS:
1801                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1802                 break;
1803         }
1804 
1805         if (type != ARC_SPACE_DATA) {
1806                 ASSERT(arc_meta_used >= space);
1807                 if (arc_meta_max < arc_meta_used)
1808                         arc_meta_max = arc_meta_used;
1809                 ARCSTAT_INCR(arcstat_meta_used, -space);
1810         }
1811 
1812         ASSERT(arc_size >= space);
1813         atomic_add_64(&arc_size, -space);
1814 }
1815 
1816 arc_buf_t *
1817 arc_buf_alloc(spa_t *spa, int32_t size, void *tag, arc_buf_contents_t type)
1818 {
1819         arc_buf_hdr_t *hdr;
1820         arc_buf_t *buf;
1821 
1822         ASSERT3U(size, >, 0);
1823         hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
1824         ASSERT(BUF_EMPTY(hdr));
1825         ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
1826         hdr->b_size = size;
1827         hdr->b_spa = spa_load_guid(spa);
1828 
1829         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1830         buf->b_hdr = hdr;
1831         buf->b_data = NULL;
1832         buf->b_efunc = NULL;
1833         buf->b_private = NULL;
1834         buf->b_next = NULL;
1835 
1836         hdr->b_flags = arc_bufc_to_flags(type);
1837         hdr->b_flags |= ARC_FLAG_HAS_L1HDR;
1838 
1839         hdr->b_l1hdr.b_buf = buf;
1840         hdr->b_l1hdr.b_state = arc_anon;
1841         hdr->b_l1hdr.b_arc_access = 0;
1842         hdr->b_l1hdr.b_datacnt = 1;
1843         hdr->b_l1hdr.b_tmp_cdata = NULL;
1844 
1845         arc_get_data_buf(buf);
1846         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
1847         (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
1848 
1849         return (buf);
1850 }
1851 
1852 static char *arc_onloan_tag = "onloan";
1853 
1854 /*
1855  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1856  * flight data by arc_tempreserve_space() until they are "returned". Loaned
1857  * buffers must be returned to the arc before they can be used by the DMU or
1858  * freed.
1859  */
1860 arc_buf_t *
1861 arc_loan_buf(spa_t *spa, int size)
1862 {
1863         arc_buf_t *buf;
1864 
1865         buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1866 
1867         atomic_add_64(&arc_loaned_bytes, size);
1868         return (buf);
1869 }
1870 
1871 /*
1872  * Return a loaned arc buffer to the arc.
1873  */
1874 void
1875 arc_return_buf(arc_buf_t *buf, void *tag)
1876 {
1877         arc_buf_hdr_t *hdr = buf->b_hdr;
1878 
1879         ASSERT(buf->b_data != NULL);
1880         ASSERT(HDR_HAS_L1HDR(hdr));
1881         (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
1882         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
1883 
1884         atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1885 }
1886 
1887 /* Detach an arc_buf from a dbuf (tag) */
1888 void
1889 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1890 {
1891         arc_buf_hdr_t *hdr = buf->b_hdr;
1892 
1893         ASSERT(buf->b_data != NULL);
1894         ASSERT(HDR_HAS_L1HDR(hdr));
1895         (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
1896         (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
1897         buf->b_efunc = NULL;
1898         buf->b_private = NULL;
1899 
1900         atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1901 }
1902 
1903 static arc_buf_t *
1904 arc_buf_clone(arc_buf_t *from)
1905 {
1906         arc_buf_t *buf;
1907         arc_buf_hdr_t *hdr = from->b_hdr;
1908         uint64_t size = hdr->b_size;
1909 
1910         ASSERT(HDR_HAS_L1HDR(hdr));
1911         ASSERT(hdr->b_l1hdr.b_state != arc_anon);
1912 
1913         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1914         buf->b_hdr = hdr;
1915         buf->b_data = NULL;
1916         buf->b_efunc = NULL;
1917         buf->b_private = NULL;
1918         buf->b_next = hdr->b_l1hdr.b_buf;
1919         hdr->b_l1hdr.b_buf = buf;
1920         arc_get_data_buf(buf);
1921         bcopy(from->b_data, buf->b_data, size);
1922 
1923         /*
1924          * This buffer already exists in the arc so create a duplicate
1925          * copy for the caller.  If the buffer is associated with user data
1926          * then track the size and number of duplicates.  These stats will be
1927          * updated as duplicate buffers are created and destroyed.
1928          */
1929         if (HDR_ISTYPE_DATA(hdr)) {
1930                 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1931                 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1932         }
1933         hdr->b_l1hdr.b_datacnt += 1;
1934         return (buf);
1935 }
1936 
1937 void
1938 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1939 {
1940         arc_buf_hdr_t *hdr;
1941         kmutex_t *hash_lock;
1942 
1943         /*
1944          * Check to see if this buffer is evicted.  Callers
1945          * must verify b_data != NULL to know if the add_ref
1946          * was successful.
1947          */
1948         mutex_enter(&buf->b_evict_lock);
1949         if (buf->b_data == NULL) {
1950                 mutex_exit(&buf->b_evict_lock);
1951                 return;
1952         }
1953         hash_lock = HDR_LOCK(buf->b_hdr);
1954         mutex_enter(hash_lock);
1955         hdr = buf->b_hdr;
1956         ASSERT(HDR_HAS_L1HDR(hdr));
1957         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1958         mutex_exit(&buf->b_evict_lock);
1959 
1960         ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
1961             hdr->b_l1hdr.b_state == arc_mfu);
1962 
1963         add_reference(hdr, hash_lock, tag);
1964         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1965         arc_access(hdr, hash_lock);
1966         mutex_exit(hash_lock);
1967         ARCSTAT_BUMP(arcstat_hits);
1968         ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
1969             demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
1970             data, metadata, hits);
1971 }
1972 
1973 static void
1974 arc_buf_free_on_write(void *data, size_t size,
1975     void (*free_func)(void *, size_t))
1976 {
1977         l2arc_data_free_t *df;
1978 
1979         df = kmem_alloc(sizeof (*df), KM_SLEEP);
1980         df->l2df_data = data;
1981         df->l2df_size = size;
1982         df->l2df_func = free_func;
1983         mutex_enter(&l2arc_free_on_write_mtx);
1984         list_insert_head(l2arc_free_on_write, df);
1985         mutex_exit(&l2arc_free_on_write_mtx);
1986 }
1987 
1988 /*
1989  * Free the arc data buffer.  If it is an l2arc write in progress,
1990  * the buffer is placed on l2arc_free_on_write to be freed later.
1991  */
1992 static void
1993 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1994 {
1995         arc_buf_hdr_t *hdr = buf->b_hdr;
1996 
1997         if (HDR_L2_WRITING(hdr)) {
1998                 arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func);
1999                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
2000         } else {
2001                 free_func(buf->b_data, hdr->b_size);
2002         }
2003 }
2004 
2005 static void
2006 arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
2007 {
2008         ASSERT(HDR_HAS_L2HDR(hdr));
2009         ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx));
2010 
2011         /*
2012          * The b_tmp_cdata field is linked off of the b_l1hdr, so if
2013          * that doesn't exist, the header is in the arc_l2c_only state,
2014          * and there isn't anything to free (it's already been freed).
2015          */
2016         if (!HDR_HAS_L1HDR(hdr))
2017                 return;
2018 
2019         /*
2020          * The header isn't being written to the l2arc device, thus it
2021          * shouldn't have a b_tmp_cdata to free.
2022          */
2023         if (!HDR_L2_WRITING(hdr)) {
2024                 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
2025                 return;
2026         }
2027 
2028         /*
2029          * The header does not have compression enabled. This can be due
2030          * to the buffer not being compressible, or because we're
2031          * freeing the buffer before the second phase of
2032          * l2arc_write_buffer() has started (which does the compression
2033          * step). In either case, b_tmp_cdata does not point to a
2034          * separately compressed buffer, so there's nothing to free (it
2035          * points to the same buffer as the arc_buf_t's b_data field).
2036          */
2037         if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_OFF) {
2038                 hdr->b_l1hdr.b_tmp_cdata = NULL;
2039                 return;
2040         }
2041 
2042         /*
2043          * There's nothing to free since the buffer was all zero's and
2044          * compressed to a zero length buffer.
2045          */
2046         if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_EMPTY) {
2047                 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
2048                 return;
2049         }
2050 
2051         ASSERT(L2ARC_IS_VALID_COMPRESS(hdr->b_l2hdr.b_compress));
2052 
2053         arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata,
2054             hdr->b_size, zio_data_buf_free);
2055 
2056         ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write);
2057         hdr->b_l1hdr.b_tmp_cdata = NULL;
2058 }
2059 
2060 /*
2061  * Free up buf->b_data and if 'remove' is set, then pull the
2062  * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
2063  */
2064 static void
2065 arc_buf_destroy(arc_buf_t *buf, boolean_t remove)
2066 {
2067         arc_buf_t **bufp;
2068 
2069         /* free up data associated with the buf */
2070         if (buf->b_data != NULL) {
2071                 arc_state_t *state = buf->b_hdr->b_l1hdr.b_state;
2072                 uint64_t size = buf->b_hdr->b_size;
2073                 arc_buf_contents_t type = arc_buf_type(buf->b_hdr);
2074 
2075                 arc_cksum_verify(buf);
2076                 arc_buf_unwatch(buf);
2077 
2078                 if (type == ARC_BUFC_METADATA) {
2079                         arc_buf_data_free(buf, zio_buf_free);
2080                         arc_space_return(size, ARC_SPACE_META);
2081                 } else {
2082                         ASSERT(type == ARC_BUFC_DATA);
2083                         arc_buf_data_free(buf, zio_data_buf_free);
2084                         arc_space_return(size, ARC_SPACE_DATA);
2085                 }
2086 
2087                 /* protected by hash lock, if in the hash table */
2088                 if (multilist_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) {
2089                         uint64_t *cnt = &state->arcs_lsize[type];
2090 
2091                         ASSERT(refcount_is_zero(
2092                             &buf->b_hdr->b_l1hdr.b_refcnt));
2093                         ASSERT(state != arc_anon && state != arc_l2c_only);
2094 
2095                         ASSERT3U(*cnt, >=, size);
2096                         atomic_add_64(cnt, -size);
2097                 }
2098 
2099                 (void) refcount_remove_many(&state->arcs_size, size, buf);
2100                 buf->b_data = NULL;
2101 
2102                 /*
2103                  * If we're destroying a duplicate buffer make sure
2104                  * that the appropriate statistics are updated.
2105                  */
2106                 if (buf->b_hdr->b_l1hdr.b_datacnt > 1 &&
2107                     HDR_ISTYPE_DATA(buf->b_hdr)) {
2108                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
2109                         ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
2110                 }
2111                 ASSERT(buf->b_hdr->b_l1hdr.b_datacnt > 0);
2112                 buf->b_hdr->b_l1hdr.b_datacnt -= 1;
2113         }
2114 
2115         /* only remove the buf if requested */
2116         if (!remove)
2117                 return;
2118 
2119         /* remove the buf from the hdr list */
2120         for (bufp = &buf->b_hdr->b_l1hdr.b_buf; *bufp != buf;
2121             bufp = &(*bufp)->b_next)
2122                 continue;
2123         *bufp = buf->b_next;
2124         buf->b_next = NULL;
2125 
2126         ASSERT(buf->b_efunc == NULL);
2127 
2128         /* clean up the buf */
2129         buf->b_hdr = NULL;
2130         kmem_cache_free(buf_cache, buf);
2131 }
2132 
2133 static void
2134 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
2135 {
2136         l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
2137         l2arc_dev_t *dev = l2hdr->b_dev;
2138 
2139         ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
2140         ASSERT(HDR_HAS_L2HDR(hdr));
2141 
2142         list_remove(&dev->l2ad_buflist, hdr);
2143 
2144         /*
2145          * We don't want to leak the b_tmp_cdata buffer that was
2146          * allocated in l2arc_write_buffers()
2147          */
2148         arc_buf_l2_cdata_free(hdr);
2149 
2150         /*
2151          * If the l2hdr's b_daddr is equal to L2ARC_ADDR_UNSET, then
2152          * this header is being processed by l2arc_write_buffers() (i.e.
2153          * it's in the first stage of l2arc_write_buffers()).
2154          * Re-affirming that truth here, just to serve as a reminder. If
2155          * b_daddr does not equal L2ARC_ADDR_UNSET, then the header may or
2156          * may not have its HDR_L2_WRITING flag set. (the write may have
2157          * completed, in which case HDR_L2_WRITING will be false and the
2158          * b_daddr field will point to the address of the buffer on disk).
2159          */
2160         IMPLY(l2hdr->b_daddr == L2ARC_ADDR_UNSET, HDR_L2_WRITING(hdr));
2161 
2162         /*
2163          * If b_daddr is equal to L2ARC_ADDR_UNSET, we're racing with
2164          * l2arc_write_buffers(). Since we've just removed this header
2165          * from the l2arc buffer list, this header will never reach the
2166          * second stage of l2arc_write_buffers(), which increments the
2167          * accounting stats for this header. Thus, we must be careful
2168          * not to decrement them for this header either.
2169          */
2170         if (l2hdr->b_daddr != L2ARC_ADDR_UNSET) {
2171                 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
2172                 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
2173 
2174                 vdev_space_update(dev->l2ad_vdev,
2175                     -l2hdr->b_asize, 0, 0);
2176 
2177                 (void) refcount_remove_many(&dev->l2ad_alloc,
2178                     l2hdr->b_asize, hdr);
2179         }
2180 
2181         hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
2182 }
2183 
2184 static void
2185 arc_hdr_destroy(arc_buf_hdr_t *hdr)
2186 {
2187         if (HDR_HAS_L1HDR(hdr)) {
2188                 ASSERT(hdr->b_l1hdr.b_buf == NULL ||
2189                     hdr->b_l1hdr.b_datacnt > 0);
2190                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2191                 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
2192         }
2193         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2194         ASSERT(!HDR_IN_HASH_TABLE(hdr));
2195 
2196         if (HDR_HAS_L2HDR(hdr)) {
2197                 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
2198                 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
2199 
2200                 if (!buflist_held)
2201                         mutex_enter(&dev->l2ad_mtx);
2202 
2203                 /*
2204                  * Even though we checked this conditional above, we
2205                  * need to check this again now that we have the
2206                  * l2ad_mtx. This is because we could be racing with
2207                  * another thread calling l2arc_evict() which might have
2208                  * destroyed this header's L2 portion as we were waiting
2209                  * to acquire the l2ad_mtx. If that happens, we don't
2210                  * want to re-destroy the header's L2 portion.
2211                  */
2212                 if (HDR_HAS_L2HDR(hdr))
2213                         arc_hdr_l2hdr_destroy(hdr);
2214 
2215                 if (!buflist_held)
2216                         mutex_exit(&dev->l2ad_mtx);
2217         }
2218 
2219         if (!BUF_EMPTY(hdr))
2220                 buf_discard_identity(hdr);
2221 
2222         if (hdr->b_freeze_cksum != NULL) {
2223                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
2224                 hdr->b_freeze_cksum = NULL;
2225         }
2226 
2227         if (HDR_HAS_L1HDR(hdr)) {
2228                 while (hdr->b_l1hdr.b_buf) {
2229                         arc_buf_t *buf = hdr->b_l1hdr.b_buf;
2230 
2231                         if (buf->b_efunc != NULL) {
2232                                 mutex_enter(&arc_user_evicts_lock);
2233                                 mutex_enter(&buf->b_evict_lock);
2234                                 ASSERT(buf->b_hdr != NULL);
2235                                 arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE);
2236                                 hdr->b_l1hdr.b_buf = buf->b_next;
2237                                 buf->b_hdr = &arc_eviction_hdr;
2238                                 buf->b_next = arc_eviction_list;
2239                                 arc_eviction_list = buf;
2240                                 mutex_exit(&buf->b_evict_lock);
2241                                 cv_signal(&arc_user_evicts_cv);
2242                                 mutex_exit(&arc_user_evicts_lock);
2243                         } else {
2244                                 arc_buf_destroy(hdr->b_l1hdr.b_buf, TRUE);
2245                         }
2246                 }
2247 #ifdef ZFS_DEBUG
2248                 if (hdr->b_l1hdr.b_thawed != NULL) {
2249                         kmem_free(hdr->b_l1hdr.b_thawed, 1);
2250                         hdr->b_l1hdr.b_thawed = NULL;
2251                 }
2252 #endif
2253         }
2254 
2255         ASSERT3P(hdr->b_hash_next, ==, NULL);
2256         if (HDR_HAS_L1HDR(hdr)) {
2257                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
2258                 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
2259                 kmem_cache_free(hdr_full_cache, hdr);
2260         } else {
2261                 kmem_cache_free(hdr_l2only_cache, hdr);
2262         }
2263 }
2264 
2265 void
2266 arc_buf_free(arc_buf_t *buf, void *tag)
2267 {
2268         arc_buf_hdr_t *hdr = buf->b_hdr;
2269         int hashed = hdr->b_l1hdr.b_state != arc_anon;
2270 
2271         ASSERT(buf->b_efunc == NULL);
2272         ASSERT(buf->b_data != NULL);
2273 
2274         if (hashed) {
2275                 kmutex_t *hash_lock = HDR_LOCK(hdr);
2276 
2277                 mutex_enter(hash_lock);
2278                 hdr = buf->b_hdr;
2279                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2280 
2281                 (void) remove_reference(hdr, hash_lock, tag);
2282                 if (hdr->b_l1hdr.b_datacnt > 1) {
2283                         arc_buf_destroy(buf, TRUE);
2284                 } else {
2285                         ASSERT(buf == hdr->b_l1hdr.b_buf);
2286                         ASSERT(buf->b_efunc == NULL);
2287                         hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
2288                 }
2289                 mutex_exit(hash_lock);
2290         } else if (HDR_IO_IN_PROGRESS(hdr)) {
2291                 int destroy_hdr;
2292                 /*
2293                  * We are in the middle of an async write.  Don't destroy
2294                  * this buffer unless the write completes before we finish
2295                  * decrementing the reference count.
2296                  */
2297                 mutex_enter(&arc_user_evicts_lock);
2298                 (void) remove_reference(hdr, NULL, tag);
2299                 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2300                 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
2301                 mutex_exit(&arc_user_evicts_lock);
2302                 if (destroy_hdr)
2303                         arc_hdr_destroy(hdr);
2304         } else {
2305                 if (remove_reference(hdr, NULL, tag) > 0)
2306                         arc_buf_destroy(buf, TRUE);
2307                 else
2308                         arc_hdr_destroy(hdr);
2309         }
2310 }
2311 
2312 boolean_t
2313 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
2314 {
2315         arc_buf_hdr_t *hdr = buf->b_hdr;
2316         kmutex_t *hash_lock = HDR_LOCK(hdr);
2317         boolean_t no_callback = (buf->b_efunc == NULL);
2318 
2319         if (hdr->b_l1hdr.b_state == arc_anon) {
2320                 ASSERT(hdr->b_l1hdr.b_datacnt == 1);
2321                 arc_buf_free(buf, tag);
2322                 return (no_callback);
2323         }
2324 
2325         mutex_enter(hash_lock);
2326         hdr = buf->b_hdr;
2327         ASSERT(hdr->b_l1hdr.b_datacnt > 0);
2328         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2329         ASSERT(hdr->b_l1hdr.b_state != arc_anon);
2330         ASSERT(buf->b_data != NULL);
2331 
2332         (void) remove_reference(hdr, hash_lock, tag);
2333         if (hdr->b_l1hdr.b_datacnt > 1) {
2334                 if (no_callback)
2335                         arc_buf_destroy(buf, TRUE);
2336         } else if (no_callback) {
2337                 ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL);
2338                 ASSERT(buf->b_efunc == NULL);
2339                 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
2340         }
2341         ASSERT(no_callback || hdr->b_l1hdr.b_datacnt > 1 ||
2342             refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2343         mutex_exit(hash_lock);
2344         return (no_callback);
2345 }
2346 
2347 int32_t
2348 arc_buf_size(arc_buf_t *buf)
2349 {
2350         return (buf->b_hdr->b_size);
2351 }
2352 
2353 /*
2354  * Called from the DMU to determine if the current buffer should be
2355  * evicted. In order to ensure proper locking, the eviction must be initiated
2356  * from the DMU. Return true if the buffer is associated with user data and
2357  * duplicate buffers still exist.
2358  */
2359 boolean_t
2360 arc_buf_eviction_needed(arc_buf_t *buf)
2361 {
2362         arc_buf_hdr_t *hdr;
2363         boolean_t evict_needed = B_FALSE;
2364 
2365         if (zfs_disable_dup_eviction)
2366                 return (B_FALSE);
2367 
2368         mutex_enter(&buf->b_evict_lock);
2369         hdr = buf->b_hdr;
2370         if (hdr == NULL) {
2371                 /*
2372                  * We are in arc_do_user_evicts(); let that function
2373                  * perform the eviction.
2374                  */
2375                 ASSERT(buf->b_data == NULL);
2376                 mutex_exit(&buf->b_evict_lock);
2377                 return (B_FALSE);
2378         } else if (buf->b_data == NULL) {
2379                 /*
2380                  * We have already been added to the arc eviction list;
2381                  * recommend eviction.
2382                  */
2383                 ASSERT3P(hdr, ==, &arc_eviction_hdr);
2384                 mutex_exit(&buf->b_evict_lock);
2385                 return (B_TRUE);
2386         }
2387 
2388         if (hdr->b_l1hdr.b_datacnt > 1 && HDR_ISTYPE_DATA(hdr))
2389                 evict_needed = B_TRUE;
2390 
2391         mutex_exit(&buf->b_evict_lock);
2392         return (evict_needed);
2393 }
2394 
2395 /*
2396  * Evict the arc_buf_hdr that is provided as a parameter. The resultant
2397  * state of the header is dependent on it's state prior to entering this
2398  * function. The following transitions are possible:
2399  *
2400  *    - arc_mru -> arc_mru_ghost
2401  *    - arc_mfu -> arc_mfu_ghost
2402  *    - arc_mru_ghost -> arc_l2c_only
2403  *    - arc_mru_ghost -> deleted
2404  *    - arc_mfu_ghost -> arc_l2c_only
2405  *    - arc_mfu_ghost -> deleted
2406  */
2407 static int64_t
2408 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
2409 {
2410         arc_state_t *evicted_state, *state;
2411         int64_t bytes_evicted = 0;
2412 
2413         ASSERT(MUTEX_HELD(hash_lock));
2414         ASSERT(HDR_HAS_L1HDR(hdr));
2415 
2416         state = hdr->b_l1hdr.b_state;
2417         if (GHOST_STATE(state)) {
2418                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2419                 ASSERT(hdr->b_l1hdr.b_buf == NULL);
2420 
2421                 /*
2422                  * l2arc_write_buffers() relies on a header's L1 portion
2423                  * (i.e. it's b_tmp_cdata field) during it's write phase.
2424                  * Thus, we cannot push a header onto the arc_l2c_only
2425                  * state (removing it's L1 piece) until the header is
2426                  * done being written to the l2arc.
2427                  */
2428                 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
2429                         ARCSTAT_BUMP(arcstat_evict_l2_skip);
2430                         return (bytes_evicted);
2431                 }
2432 
2433                 ARCSTAT_BUMP(arcstat_deleted);
2434                 bytes_evicted += hdr->b_size;
2435 
2436                 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
2437 
2438                 if (HDR_HAS_L2HDR(hdr)) {
2439                         /*
2440                          * This buffer is cached on the 2nd Level ARC;
2441                          * don't destroy the header.
2442                          */
2443                         arc_change_state(arc_l2c_only, hdr, hash_lock);
2444                         /*
2445                          * dropping from L1+L2 cached to L2-only,
2446                          * realloc to remove the L1 header.
2447                          */
2448                         hdr = arc_hdr_realloc(hdr, hdr_full_cache,
2449                             hdr_l2only_cache);
2450                 } else {
2451                         arc_change_state(arc_anon, hdr, hash_lock);
2452                         arc_hdr_destroy(hdr);
2453                 }
2454                 return (bytes_evicted);
2455         }
2456 
2457         ASSERT(state == arc_mru || state == arc_mfu);
2458         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2459 
2460         /* prefetch buffers have a minimum lifespan */
2461         if (HDR_IO_IN_PROGRESS(hdr) ||
2462             ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
2463             ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
2464             arc_min_prefetch_lifespan)) {
2465                 ARCSTAT_BUMP(arcstat_evict_skip);
2466                 return (bytes_evicted);
2467         }
2468 
2469         ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
2470         ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0);
2471         while (hdr->b_l1hdr.b_buf) {
2472                 arc_buf_t *buf = hdr->b_l1hdr.b_buf;
2473                 if (!mutex_tryenter(&buf->b_evict_lock)) {
2474                         ARCSTAT_BUMP(arcstat_mutex_miss);
2475                         break;
2476                 }
2477                 if (buf->b_data != NULL)
2478                         bytes_evicted += hdr->b_size;
2479                 if (buf->b_efunc != NULL) {
2480                         mutex_enter(&arc_user_evicts_lock);
2481                         arc_buf_destroy(buf, FALSE);
2482                         hdr->b_l1hdr.b_buf = buf->b_next;
2483                         buf->b_hdr = &arc_eviction_hdr;
2484                         buf->b_next = arc_eviction_list;
2485                         arc_eviction_list = buf;
2486                         cv_signal(&arc_user_evicts_cv);
2487                         mutex_exit(&arc_user_evicts_lock);
2488                         mutex_exit(&buf->b_evict_lock);
2489                 } else {
2490                         mutex_exit(&buf->b_evict_lock);
2491                         arc_buf_destroy(buf, TRUE);
2492                 }
2493         }
2494 
2495         if (HDR_HAS_L2HDR(hdr)) {
2496                 ARCSTAT_INCR(arcstat_evict_l2_cached, hdr->b_size);
2497         } else {
2498                 if (l2arc_write_eligible(hdr->b_spa, hdr))
2499                         ARCSTAT_INCR(arcstat_evict_l2_eligible, hdr->b_size);
2500                 else
2501                         ARCSTAT_INCR(arcstat_evict_l2_ineligible, hdr->b_size);
2502         }
2503 
2504         if (hdr->b_l1hdr.b_datacnt == 0) {
2505                 arc_change_state(evicted_state, hdr, hash_lock);
2506                 ASSERT(HDR_IN_HASH_TABLE(hdr));
2507                 hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
2508                 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
2509                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
2510         }
2511 
2512         return (bytes_evicted);
2513 }
2514 
2515 static uint64_t
2516 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
2517     uint64_t spa, int64_t bytes)
2518 {
2519         multilist_sublist_t *mls;
2520         uint64_t bytes_evicted = 0;
2521         arc_buf_hdr_t *hdr;
2522         kmutex_t *hash_lock;
2523         int evict_count = 0;
2524 
2525         ASSERT3P(marker, !=, NULL);
2526         IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
2527 
2528         mls = multilist_sublist_lock(ml, idx);
2529 
2530         for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
2531             hdr = multilist_sublist_prev(mls, marker)) {
2532                 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
2533                     (evict_count >= zfs_arc_evict_batch_limit))
2534                         break;
2535 
2536                 /*
2537                  * To keep our iteration location, move the marker
2538                  * forward. Since we're not holding hdr's hash lock, we
2539                  * must be very careful and not remove 'hdr' from the
2540                  * sublist. Otherwise, other consumers might mistake the
2541                  * 'hdr' as not being on a sublist when they call the
2542                  * multilist_link_active() function (they all rely on
2543                  * the hash lock protecting concurrent insertions and
2544                  * removals). multilist_sublist_move_forward() was
2545                  * specifically implemented to ensure this is the case
2546                  * (only 'marker' will be removed and re-inserted).
2547                  */
2548                 multilist_sublist_move_forward(mls, marker);
2549 
2550                 /*
2551                  * The only case where the b_spa field should ever be
2552                  * zero, is the marker headers inserted by
2553                  * arc_evict_state(). It's possible for multiple threads
2554                  * to be calling arc_evict_state() concurrently (e.g.
2555                  * dsl_pool_close() and zio_inject_fault()), so we must
2556                  * skip any markers we see from these other threads.
2557                  */
2558                 if (hdr->b_spa == 0)
2559                         continue;
2560 
2561                 /* we're only interested in evicting buffers of a certain spa */
2562                 if (spa != 0 && hdr->b_spa != spa) {
2563                         ARCSTAT_BUMP(arcstat_evict_skip);
2564                         continue;
2565                 }
2566 
2567                 hash_lock = HDR_LOCK(hdr);
2568 
2569                 /*
2570                  * We aren't calling this function from any code path
2571                  * that would already be holding a hash lock, so we're
2572                  * asserting on this assumption to be defensive in case
2573                  * this ever changes. Without this check, it would be
2574                  * possible to incorrectly increment arcstat_mutex_miss
2575                  * below (e.g. if the code changed such that we called
2576                  * this function with a hash lock held).
2577                  */
2578                 ASSERT(!MUTEX_HELD(hash_lock));
2579 
2580                 if (mutex_tryenter(hash_lock)) {
2581                         uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
2582                         mutex_exit(hash_lock);
2583 
2584                         bytes_evicted += evicted;
2585 
2586                         /*
2587                          * If evicted is zero, arc_evict_hdr() must have
2588                          * decided to skip this header, don't increment
2589                          * evict_count in this case.
2590                          */
2591                         if (evicted != 0)
2592                                 evict_count++;
2593 
2594                         /*
2595                          * If arc_size isn't overflowing, signal any
2596                          * threads that might happen to be waiting.
2597                          *
2598                          * For each header evicted, we wake up a single
2599                          * thread. If we used cv_broadcast, we could
2600                          * wake up "too many" threads causing arc_size
2601                          * to significantly overflow arc_c; since
2602                          * arc_get_data_buf() doesn't check for overflow
2603                          * when it's woken up (it doesn't because it's
2604                          * possible for the ARC to be overflowing while
2605                          * full of un-evictable buffers, and the
2606                          * function should proceed in this case).
2607                          *
2608                          * If threads are left sleeping, due to not
2609                          * using cv_broadcast, they will be woken up
2610                          * just before arc_reclaim_thread() sleeps.
2611                          */
2612                         mutex_enter(&arc_reclaim_lock);
2613                         if (!arc_is_overflowing())
2614                                 cv_signal(&arc_reclaim_waiters_cv);
2615                         mutex_exit(&arc_reclaim_lock);
2616                 } else {
2617                         ARCSTAT_BUMP(arcstat_mutex_miss);
2618                 }
2619         }
2620 
2621         multilist_sublist_unlock(mls);
2622 
2623         return (bytes_evicted);
2624 }
2625 
2626 /*
2627  * Evict buffers from the given arc state, until we've removed the
2628  * specified number of bytes. Move the removed buffers to the
2629  * appropriate evict state.
2630  *
2631  * This function makes a "best effort". It skips over any buffers
2632  * it can't get a hash_lock on, and so, may not catch all candidates.
2633  * It may also return without evicting as much space as requested.
2634  *
2635  * If bytes is specified using the special value ARC_EVICT_ALL, this
2636  * will evict all available (i.e. unlocked and evictable) buffers from
2637  * the given arc state; which is used by arc_flush().
2638  */
2639 static uint64_t
2640 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
2641     arc_buf_contents_t type)
2642 {
2643         uint64_t total_evicted = 0;
2644         multilist_t *ml = &state->arcs_list[type];
2645         int num_sublists;
2646         arc_buf_hdr_t **markers;
2647 
2648         IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
2649 
2650         num_sublists = multilist_get_num_sublists(ml);
2651 
2652         /*
2653          * If we've tried to evict from each sublist, made some
2654          * progress, but still have not hit the target number of bytes
2655          * to evict, we want to keep trying. The markers allow us to
2656          * pick up where we left off for each individual sublist, rather
2657          * than starting from the tail each time.
2658          */
2659         markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
2660         for (int i = 0; i < num_sublists; i++) {
2661                 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
2662 
2663                 /*
2664                  * A b_spa of 0 is used to indicate that this header is
2665                  * a marker. This fact is used in arc_adjust_type() and
2666                  * arc_evict_state_impl().
2667                  */
2668                 markers[i]->b_spa = 0;
2669 
2670                 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
2671                 multilist_sublist_insert_tail(mls, markers[i]);
2672                 multilist_sublist_unlock(mls);
2673         }
2674 
2675         /*
2676          * While we haven't hit our target number of bytes to evict, or
2677          * we're evicting all available buffers.
2678          */
2679         while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
2680                 /*
2681                  * Start eviction using a randomly selected sublist,
2682                  * this is to try and evenly balance eviction across all
2683                  * sublists. Always starting at the same sublist
2684                  * (e.g. index 0) would cause evictions to favor certain
2685                  * sublists over others.
2686                  */
2687                 int sublist_idx = multilist_get_random_index(ml);
2688                 uint64_t scan_evicted = 0;
2689 
2690                 for (int i = 0; i < num_sublists; i++) {
2691                         uint64_t bytes_remaining;
2692                         uint64_t bytes_evicted;
2693 
2694                         if (bytes == ARC_EVICT_ALL)
2695                                 bytes_remaining = ARC_EVICT_ALL;
2696                         else if (total_evicted < bytes)
2697                                 bytes_remaining = bytes - total_evicted;
2698                         else
2699                                 break;
2700 
2701                         bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
2702                             markers[sublist_idx], spa, bytes_remaining);
2703 
2704                         scan_evicted += bytes_evicted;
2705                         total_evicted += bytes_evicted;
2706 
2707                         /* we've reached the end, wrap to the beginning */
2708                         if (++sublist_idx >= num_sublists)
2709                                 sublist_idx = 0;
2710                 }
2711 
2712                 /*
2713                  * If we didn't evict anything during this scan, we have
2714                  * no reason to believe we'll evict more during another
2715                  * scan, so break the loop.
2716                  */
2717                 if (scan_evicted == 0) {
2718                         /* This isn't possible, let's make that obvious */
2719                         ASSERT3S(bytes, !=, 0);
2720 
2721                         /*
2722                          * When bytes is ARC_EVICT_ALL, the only way to
2723                          * break the loop is when scan_evicted is zero.
2724                          * In that case, we actually have evicted enough,
2725                          * so we don't want to increment the kstat.
2726                          */
2727                         if (bytes != ARC_EVICT_ALL) {
2728                                 ASSERT3S(total_evicted, <, bytes);
2729                                 ARCSTAT_BUMP(arcstat_evict_not_enough);
2730                         }
2731 
2732                         break;
2733                 }
2734         }
2735 
2736         for (int i = 0; i < num_sublists; i++) {
2737                 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
2738                 multilist_sublist_remove(mls, markers[i]);
2739                 multilist_sublist_unlock(mls);
2740 
2741                 kmem_cache_free(hdr_full_cache, markers[i]);
2742         }
2743         kmem_free(markers, sizeof (*markers) * num_sublists);
2744 
2745         return (total_evicted);
2746 }
2747 
2748 /*
2749  * Flush all "evictable" data of the given type from the arc state
2750  * specified. This will not evict any "active" buffers (i.e. referenced).
2751  *
2752  * When 'retry' is set to FALSE, the function will make a single pass
2753  * over the state and evict any buffers that it can. Since it doesn't
2754  * continually retry the eviction, it might end up leaving some buffers
2755  * in the ARC due to lock misses.
2756  *
2757  * When 'retry' is set to TRUE, the function will continually retry the
2758  * eviction until *all* evictable buffers have been removed from the
2759  * state. As a result, if concurrent insertions into the state are
2760  * allowed (e.g. if the ARC isn't shutting down), this function might
2761  * wind up in an infinite loop, continually trying to evict buffers.
2762  */
2763 static uint64_t
2764 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
2765     boolean_t retry)
2766 {
2767         uint64_t evicted = 0;
2768 
2769         while (state->arcs_lsize[type] != 0) {
2770                 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
2771 
2772                 if (!retry)
2773                         break;
2774         }
2775 
2776         return (evicted);
2777 }
2778 
2779 /*
2780  * Evict the specified number of bytes from the state specified,
2781  * restricting eviction to the spa and type given. This function
2782  * prevents us from trying to evict more from a state's list than
2783  * is "evictable", and to skip evicting altogether when passed a
2784  * negative value for "bytes". In contrast, arc_evict_state() will
2785  * evict everything it can, when passed a negative value for "bytes".
2786  */
2787 static uint64_t
2788 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
2789     arc_buf_contents_t type)
2790 {
2791         int64_t delta;
2792 
2793         if (bytes > 0 && state->arcs_lsize[type] > 0) {
2794                 delta = MIN(state->arcs_lsize[type], bytes);
2795                 return (arc_evict_state(state, spa, delta, type));
2796         }
2797 
2798         return (0);
2799 }
2800 
2801 /*
2802  * Evict metadata buffers from the cache, such that arc_meta_used is
2803  * capped by the arc_meta_limit tunable.
2804  */
2805 static uint64_t
2806 arc_adjust_meta(void)
2807 {
2808         uint64_t total_evicted = 0;
2809         int64_t target;
2810 
2811         /*
2812          * If we're over the meta limit, we want to evict enough
2813          * metadata to get back under the meta limit. We don't want to
2814          * evict so much that we drop the MRU below arc_p, though. If
2815          * we're over the meta limit more than we're over arc_p, we
2816          * evict some from the MRU here, and some from the MFU below.
2817          */
2818         target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
2819             (int64_t)(refcount_count(&arc_anon->arcs_size) +
2820             refcount_count(&arc_mru->arcs_size) - arc_p));
2821 
2822         total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
2823 
2824         /*
2825          * Similar to the above, we want to evict enough bytes to get us
2826          * below the meta limit, but not so much as to drop us below the
2827          * space alloted to the MFU (which is defined as arc_c - arc_p).
2828          */
2829         target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
2830             (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
2831 
2832         total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
2833 
2834         return (total_evicted);
2835 }
2836 
2837 /*
2838  * Return the type of the oldest buffer in the given arc state
2839  *
2840  * This function will select a random sublist of type ARC_BUFC_DATA and
2841  * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
2842  * is compared, and the type which contains the "older" buffer will be
2843  * returned.
2844  */
2845 static arc_buf_contents_t
2846 arc_adjust_type(arc_state_t *state)
2847 {
2848         multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
2849         multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
2850         int data_idx = multilist_get_random_index(data_ml);
2851         int meta_idx = multilist_get_random_index(meta_ml);
2852         multilist_sublist_t *data_mls;
2853         multilist_sublist_t *meta_mls;
2854         arc_buf_contents_t type;
2855         arc_buf_hdr_t *data_hdr;
2856         arc_buf_hdr_t *meta_hdr;
2857 
2858         /*
2859          * We keep the sublist lock until we're finished, to prevent
2860          * the headers from being destroyed via arc_evict_state().
2861          */
2862         data_mls = multilist_sublist_lock(data_ml, data_idx);
2863         meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
2864 
2865         /*
2866          * These two loops are to ensure we skip any markers that
2867          * might be at the tail of the lists due to arc_evict_state().
2868          */
2869 
2870         for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
2871             data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
2872                 if (data_hdr->b_spa != 0)
2873                         break;
2874         }
2875 
2876         for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
2877             meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
2878                 if (meta_hdr->b_spa != 0)
2879                         break;
2880         }
2881 
2882         if (data_hdr == NULL && meta_hdr == NULL) {
2883                 type = ARC_BUFC_DATA;
2884         } else if (data_hdr == NULL) {
2885                 ASSERT3P(meta_hdr, !=, NULL);
2886                 type = ARC_BUFC_METADATA;
2887         } else if (meta_hdr == NULL) {
2888                 ASSERT3P(data_hdr, !=, NULL);
2889                 type = ARC_BUFC_DATA;
2890         } else {
2891                 ASSERT3P(data_hdr, !=, NULL);
2892                 ASSERT3P(meta_hdr, !=, NULL);
2893 
2894                 /* The headers can't be on the sublist without an L1 header */
2895                 ASSERT(HDR_HAS_L1HDR(data_hdr));
2896                 ASSERT(HDR_HAS_L1HDR(meta_hdr));
2897 
2898                 if (data_hdr->b_l1hdr.b_arc_access <
2899                     meta_hdr->b_l1hdr.b_arc_access) {
2900                         type = ARC_BUFC_DATA;
2901                 } else {
2902                         type = ARC_BUFC_METADATA;
2903                 }
2904         }
2905 
2906         multilist_sublist_unlock(meta_mls);
2907         multilist_sublist_unlock(data_mls);
2908 
2909         return (type);
2910 }
2911 
2912 /*
2913  * Evict buffers from the cache, such that arc_size is capped by arc_c.
2914  */
2915 static uint64_t
2916 arc_adjust(void)
2917 {
2918         uint64_t total_evicted = 0;
2919         uint64_t bytes;
2920         int64_t target;
2921 
2922         /*
2923          * If we're over arc_meta_limit, we want to correct that before
2924          * potentially evicting data buffers below.
2925          */
2926         total_evicted += arc_adjust_meta();
2927 
2928         /*
2929          * Adjust MRU size
2930          *
2931          * If we're over the target cache size, we want to evict enough
2932          * from the list to get back to our target size. We don't want
2933          * to evict too much from the MRU, such that it drops below
2934          * arc_p. So, if we're over our target cache size more than
2935          * the MRU is over arc_p, we'll evict enough to get back to
2936          * arc_p here, and then evict more from the MFU below.
2937          */
2938         target = MIN((int64_t)(arc_size - arc_c),
2939             (int64_t)(refcount_count(&arc_anon->arcs_size) +
2940             refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
2941 
2942         /*
2943          * If we're below arc_meta_min, always prefer to evict data.
2944          * Otherwise, try to satisfy the requested number of bytes to
2945          * evict from the type which contains older buffers; in an
2946          * effort to keep newer buffers in the cache regardless of their
2947          * type. If we cannot satisfy the number of bytes from this
2948          * type, spill over into the next type.
2949          */
2950         if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
2951             arc_meta_used > arc_meta_min) {
2952                 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
2953                 total_evicted += bytes;
2954 
2955                 /*
2956                  * If we couldn't evict our target number of bytes from
2957                  * metadata, we try to get the rest from data.
2958                  */
2959                 target -= bytes;
2960 
2961                 total_evicted +=
2962                     arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
2963         } else {
2964                 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
2965                 total_evicted += bytes;
2966 
2967                 /*
2968                  * If we couldn't evict our target number of bytes from
2969                  * data, we try to get the rest from metadata.
2970                  */
2971                 target -= bytes;
2972 
2973                 total_evicted +=
2974                     arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
2975         }
2976 
2977         /*
2978          * Adjust MFU size
2979          *
2980          * Now that we've tried to evict enough from the MRU to get its
2981          * size back to arc_p, if we're still above the target cache
2982          * size, we evict the rest from the MFU.
2983          */
2984         target = arc_size - arc_c;
2985 
2986         if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
2987             arc_meta_used > arc_meta_min) {
2988                 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
2989                 total_evicted += bytes;
2990 
2991                 /*
2992                  * If we couldn't evict our target number of bytes from
2993                  * metadata, we try to get the rest from data.
2994                  */
2995                 target -= bytes;
2996 
2997                 total_evicted +=
2998                     arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
2999         } else {
3000                 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
3001                 total_evicted += bytes;
3002 
3003                 /*
3004                  * If we couldn't evict our target number of bytes from
3005                  * data, we try to get the rest from data.
3006                  */
3007                 target -= bytes;
3008 
3009                 total_evicted +=
3010                     arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
3011         }
3012 
3013         /*
3014          * Adjust ghost lists
3015          *
3016          * In addition to the above, the ARC also defines target values
3017          * for the ghost lists. The sum of the mru list and mru ghost
3018          * list should never exceed the target size of the cache, and
3019          * the sum of the mru list, mfu list, mru ghost list, and mfu
3020          * ghost list should never exceed twice the target size of the
3021          * cache. The following logic enforces these limits on the ghost
3022          * caches, and evicts from them as needed.
3023          */
3024         target = refcount_count(&arc_mru->arcs_size) +
3025             refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
3026 
3027         bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
3028         total_evicted += bytes;
3029 
3030         target -= bytes;
3031 
3032         total_evicted +=
3033             arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
3034 
3035         /*
3036          * We assume the sum of the mru list and mfu list is less than
3037          * or equal to arc_c (we enforced this above), which means we
3038          * can use the simpler of the two equations below:
3039          *
3040          *      mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
3041          *                  mru ghost + mfu ghost <= arc_c
3042          */
3043         target = refcount_count(&arc_mru_ghost->arcs_size) +
3044             refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
3045 
3046         bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
3047         total_evicted += bytes;
3048 
3049         target -= bytes;
3050 
3051         total_evicted +=
3052             arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
3053 
3054         return (total_evicted);
3055 }
3056 
3057 static void
3058 arc_do_user_evicts(void)
3059 {
3060         mutex_enter(&arc_user_evicts_lock);
3061         while (arc_eviction_list != NULL) {
3062                 arc_buf_t *buf = arc_eviction_list;
3063                 arc_eviction_list = buf->b_next;
3064                 mutex_enter(&buf->b_evict_lock);
3065                 buf->b_hdr = NULL;
3066                 mutex_exit(&buf->b_evict_lock);
3067                 mutex_exit(&arc_user_evicts_lock);
3068 
3069                 if (buf->b_efunc != NULL)
3070                         VERIFY0(buf->b_efunc(buf->b_private));
3071 
3072                 buf->b_efunc = NULL;
3073                 buf->b_private = NULL;
3074                 kmem_cache_free(buf_cache, buf);
3075                 mutex_enter(&arc_user_evicts_lock);
3076         }
3077         mutex_exit(&arc_user_evicts_lock);
3078 }
3079 
3080 void
3081 arc_flush(spa_t *spa, boolean_t retry)
3082 {
3083         uint64_t guid = 0;
3084 
3085         /*
3086          * If retry is TRUE, a spa must not be specified since we have
3087          * no good way to determine if all of a spa's buffers have been
3088          * evicted from an arc state.
3089          */
3090         ASSERT(!retry || spa == 0);
3091 
3092         if (spa != NULL)
3093                 guid = spa_load_guid(spa);
3094 
3095         (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
3096         (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
3097 
3098         (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
3099         (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
3100 
3101         (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
3102         (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
3103 
3104         (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
3105         (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
3106 
3107         arc_do_user_evicts();
3108         ASSERT(spa || arc_eviction_list == NULL);
3109 }
3110 
3111 void
3112 arc_shrink(int64_t to_free)
3113 {
3114         if (arc_c > arc_c_min) {
3115 
3116                 if (arc_c > arc_c_min + to_free)
3117                         atomic_add_64(&arc_c, -to_free);
3118                 else
3119                         arc_c = arc_c_min;
3120 
3121                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
3122                 if (arc_c > arc_size)
3123                         arc_c = MAX(arc_size, arc_c_min);
3124                 if (arc_p > arc_c)
3125                         arc_p = (arc_c >> 1);
3126                 ASSERT(arc_c >= arc_c_min);
3127                 ASSERT((int64_t)arc_p >= 0);
3128         }
3129 
3130         if (arc_size > arc_c)
3131                 (void) arc_adjust();
3132 }
3133 
3134 typedef enum free_memory_reason_t {
3135         FMR_UNKNOWN,
3136         FMR_NEEDFREE,
3137         FMR_LOTSFREE,
3138         FMR_SWAPFS_MINFREE,
3139         FMR_PAGES_PP_MAXIMUM,
3140         FMR_HEAP_ARENA,
3141         FMR_ZIO_ARENA,
3142 } free_memory_reason_t;
3143 
3144 int64_t last_free_memory;
3145 free_memory_reason_t last_free_reason;
3146 
3147 /*
3148  * Additional reserve of pages for pp_reserve.
3149  */
3150 int64_t arc_pages_pp_reserve = 64;
3151 
3152 /*
3153  * Additional reserve of pages for swapfs.
3154  */
3155 int64_t arc_swapfs_reserve = 64;
3156 
3157 /*
3158  * Return the amount of memory that can be consumed before reclaim will be
3159  * needed.  Positive if there is sufficient free memory, negative indicates
3160  * the amount of memory that needs to be freed up.
3161  */
3162 static int64_t
3163 arc_available_memory(void)
3164 {
3165         int64_t lowest = INT64_MAX;
3166         int64_t n;
3167         free_memory_reason_t r = FMR_UNKNOWN;
3168 
3169 #ifdef _KERNEL
3170         if (needfree > 0) {
3171                 n = PAGESIZE * (-needfree);
3172                 if (n < lowest) {
3173                         lowest = n;
3174                         r = FMR_NEEDFREE;
3175                 }
3176         }
3177 
3178         /*
3179          * check that we're out of range of the pageout scanner.  It starts to
3180          * schedule paging if freemem is less than lotsfree and needfree.
3181          * lotsfree is the high-water mark for pageout, and needfree is the
3182          * number of needed free pages.  We add extra pages here to make sure
3183          * the scanner doesn't start up while we're freeing memory.
3184          */
3185         n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
3186         if (n < lowest) {
3187                 lowest = n;
3188                 r = FMR_LOTSFREE;
3189         }
3190 
3191         /*
3192          * check to make sure that swapfs has enough space so that anon
3193          * reservations can still succeed. anon_resvmem() checks that the
3194          * availrmem is greater than swapfs_minfree, and the number of reserved
3195          * swap pages.  We also add a bit of extra here just to prevent
3196          * circumstances from getting really dire.
3197          */
3198         n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
3199             desfree - arc_swapfs_reserve);
3200         if (n < lowest) {
3201                 lowest = n;
3202                 r = FMR_SWAPFS_MINFREE;
3203         }
3204 
3205 
3206         /*
3207          * Check that we have enough availrmem that memory locking (e.g., via
3208          * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
3209          * stores the number of pages that cannot be locked; when availrmem
3210          * drops below pages_pp_maximum, page locking mechanisms such as
3211          * page_pp_lock() will fail.)
3212          */
3213         n = PAGESIZE * (availrmem - pages_pp_maximum -
3214             arc_pages_pp_reserve);
3215         if (n < lowest) {
3216                 lowest = n;
3217                 r = FMR_PAGES_PP_MAXIMUM;
3218         }
3219 
3220 #if defined(__i386)
3221         /*
3222          * If we're on an i386 platform, it's possible that we'll exhaust the
3223          * kernel heap space before we ever run out of available physical
3224          * memory.  Most checks of the size of the heap_area compare against
3225          * tune.t_minarmem, which is the minimum available real memory that we
3226          * can have in the system.  However, this is generally fixed at 25 pages
3227          * which is so low that it's useless.  In this comparison, we seek to
3228          * calculate the total heap-size, and reclaim if more than 3/4ths of the
3229          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
3230          * free)
3231          */
3232         n = vmem_size(heap_arena, VMEM_FREE) -
3233             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
3234         if (n < lowest) {
3235                 lowest = n;
3236                 r = FMR_HEAP_ARENA;
3237         }
3238 #endif
3239 
3240         /*
3241          * If zio data pages are being allocated out of a separate heap segment,
3242          * then enforce that the size of available vmem for this arena remains
3243          * above about 1/16th free.
3244          *
3245          * Note: The 1/16th arena free requirement was put in place
3246          * to aggressively evict memory from the arc in order to avoid
3247          * memory fragmentation issues.
3248          */
3249         if (zio_arena != NULL) {
3250                 n = vmem_size(zio_arena, VMEM_FREE) -
3251                     (vmem_size(zio_arena, VMEM_ALLOC) >> 4);
3252                 if (n < lowest) {
3253                         lowest = n;
3254                         r = FMR_ZIO_ARENA;
3255                 }
3256         }
3257 #else
3258         /* Every 100 calls, free a small amount */
3259         if (spa_get_random(100) == 0)
3260                 lowest = -1024;
3261 #endif
3262 
3263         last_free_memory = lowest;
3264         last_free_reason = r;
3265 
3266         return (lowest);
3267 }
3268 
3269 
3270 /*
3271  * Determine if the system is under memory pressure and is asking
3272  * to reclaim memory. A return value of TRUE indicates that the system
3273  * is under memory pressure and that the arc should adjust accordingly.
3274  */
3275 static boolean_t
3276 arc_reclaim_needed(void)
3277 {
3278         return (arc_available_memory() < 0);
3279 }
3280 
3281 static void
3282 arc_kmem_reap_now(void)
3283 {
3284         size_t                  i;
3285         kmem_cache_t            *prev_cache = NULL;
3286         kmem_cache_t            *prev_data_cache = NULL;
3287         extern kmem_cache_t     *zio_buf_cache[];
3288         extern kmem_cache_t     *zio_data_buf_cache[];
3289         extern kmem_cache_t     *range_seg_cache;
3290 
3291 #ifdef _KERNEL
3292         if (arc_meta_used >= arc_meta_limit) {
3293                 /*
3294                  * We are exceeding our meta-data cache limit.
3295                  * Purge some DNLC entries to release holds on meta-data.
3296                  */
3297                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
3298         }
3299 #if defined(__i386)
3300         /*
3301          * Reclaim unused memory from all kmem caches.
3302          */
3303         kmem_reap();
3304 #endif
3305 #endif
3306 
3307         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
3308                 if (zio_buf_cache[i] != prev_cache) {
3309                         prev_cache = zio_buf_cache[i];
3310                         kmem_cache_reap_now(zio_buf_cache[i]);
3311                 }
3312                 if (zio_data_buf_cache[i] != prev_data_cache) {
3313                         prev_data_cache = zio_data_buf_cache[i];
3314                         kmem_cache_reap_now(zio_data_buf_cache[i]);
3315                 }
3316         }
3317         kmem_cache_reap_now(buf_cache);
3318         kmem_cache_reap_now(hdr_full_cache);
3319         kmem_cache_reap_now(hdr_l2only_cache);
3320         kmem_cache_reap_now(range_seg_cache);
3321 
3322         if (zio_arena != NULL) {
3323                 /*
3324                  * Ask the vmem arena to reclaim unused memory from its
3325                  * quantum caches.
3326                  */
3327                 vmem_qcache_reap(zio_arena);
3328         }
3329 }
3330 
3331 /*
3332  * Threads can block in arc_get_data_buf() waiting for this thread to evict
3333  * enough data and signal them to proceed. When this happens, the threads in
3334  * arc_get_data_buf() are sleeping while holding the hash lock for their
3335  * particular arc header. Thus, we must be careful to never sleep on a
3336  * hash lock in this thread. This is to prevent the following deadlock:
3337  *
3338  *  - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L",
3339  *    waiting for the reclaim thread to signal it.
3340  *
3341  *  - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
3342  *    fails, and goes to sleep forever.
3343  *
3344  * This possible deadlock is avoided by always acquiring a hash lock
3345  * using mutex_tryenter() from arc_reclaim_thread().
3346  */
3347 static void
3348 arc_reclaim_thread(void)
3349 {
3350         clock_t                 growtime = 0;
3351         callb_cpr_t             cpr;
3352 
3353         CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
3354 
3355         mutex_enter(&arc_reclaim_lock);
3356         while (!arc_reclaim_thread_exit) {
3357                 int64_t free_memory = arc_available_memory();
3358                 uint64_t evicted = 0;
3359 
3360                 mutex_exit(&arc_reclaim_lock);
3361 
3362                 if (free_memory < 0) {
3363 
3364                         arc_no_grow = B_TRUE;
3365                         arc_warm = B_TRUE;
3366 
3367                         /*
3368                          * Wait at least zfs_grow_retry (default 60) seconds
3369                          * before considering growing.
3370                          */
3371                         growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
3372 
3373                         arc_kmem_reap_now();
3374 
3375                         /*
3376                          * If we are still low on memory, shrink the ARC
3377                          * so that we have arc_shrink_min free space.
3378                          */
3379                         free_memory = arc_available_memory();
3380 
3381                         int64_t to_free =
3382                             (arc_c >> arc_shrink_shift) - free_memory;
3383                         if (to_free > 0) {
3384 #ifdef _KERNEL
3385                                 to_free = MAX(to_free, ptob(needfree));
3386 #endif
3387                                 arc_shrink(to_free);
3388                         }
3389                 } else if (free_memory < arc_c >> arc_no_grow_shift) {
3390                         arc_no_grow = B_TRUE;
3391                 } else if (ddi_get_lbolt() >= growtime) {
3392                         arc_no_grow = B_FALSE;
3393                 }
3394 
3395                 evicted = arc_adjust();
3396 
3397                 mutex_enter(&arc_reclaim_lock);
3398 
3399                 /*
3400                  * If evicted is zero, we couldn't evict anything via
3401                  * arc_adjust(). This could be due to hash lock
3402                  * collisions, but more likely due to the majority of
3403                  * arc buffers being unevictable. Therefore, even if
3404                  * arc_size is above arc_c, another pass is unlikely to
3405                  * be helpful and could potentially cause us to enter an
3406                  * infinite loop.
3407                  */
3408                 if (arc_size <= arc_c || evicted == 0) {
3409                         /*
3410                          * We're either no longer overflowing, or we
3411                          * can't evict anything more, so we should wake
3412                          * up any threads before we go to sleep.
3413                          */
3414                         cv_broadcast(&arc_reclaim_waiters_cv);
3415 
3416                         /*
3417                          * Block until signaled, or after one second (we
3418                          * might need to perform arc_kmem_reap_now()
3419                          * even if we aren't being signalled)
3420                          */
3421                         CALLB_CPR_SAFE_BEGIN(&cpr);
3422                         (void) cv_timedwait(&arc_reclaim_thread_cv,
3423                             &arc_reclaim_lock, ddi_get_lbolt() + hz);
3424                         CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
3425                 }
3426         }
3427 
3428         arc_reclaim_thread_exit = FALSE;
3429         cv_broadcast(&arc_reclaim_thread_cv);
3430         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_lock */
3431         thread_exit();
3432 }
3433 
3434 static void
3435 arc_user_evicts_thread(void)
3436 {
3437         callb_cpr_t cpr;
3438 
3439         CALLB_CPR_INIT(&cpr, &arc_user_evicts_lock, callb_generic_cpr, FTAG);
3440 
3441         mutex_enter(&arc_user_evicts_lock);
3442         while (!arc_user_evicts_thread_exit) {
3443                 mutex_exit(&arc_user_evicts_lock);
3444 
3445                 arc_do_user_evicts();
3446 
3447                 /*
3448                  * This is necessary in order for the mdb ::arc dcmd to
3449                  * show up to date information. Since the ::arc command
3450                  * does not call the kstat's update function, without
3451                  * this call, the command may show stale stats for the
3452                  * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
3453                  * with this change, the data might be up to 1 second
3454                  * out of date; but that should suffice. The arc_state_t
3455                  * structures can be queried directly if more accurate
3456                  * information is needed.
3457                  */
3458                 if (arc_ksp != NULL)
3459                         arc_ksp->ks_update(arc_ksp, KSTAT_READ);
3460 
3461                 mutex_enter(&arc_user_evicts_lock);
3462 
3463                 /*
3464                  * Block until signaled, or after one second (we need to
3465                  * call the arc's kstat update function regularly).
3466                  */
3467                 CALLB_CPR_SAFE_BEGIN(&cpr);
3468                 (void) cv_timedwait(&arc_user_evicts_cv,
3469                     &arc_user_evicts_lock, ddi_get_lbolt() + hz);
3470                 CALLB_CPR_SAFE_END(&cpr, &arc_user_evicts_lock);
3471         }
3472 
3473         arc_user_evicts_thread_exit = FALSE;
3474         cv_broadcast(&arc_user_evicts_cv);
3475         CALLB_CPR_EXIT(&cpr);               /* drops arc_user_evicts_lock */
3476         thread_exit();
3477 }
3478 
3479 /*
3480  * Adapt arc info given the number of bytes we are trying to add and
3481  * the state that we are comming from.  This function is only called
3482  * when we are adding new content to the cache.
3483  */
3484 static void
3485 arc_adapt(int bytes, arc_state_t *state)
3486 {
3487         int mult;
3488         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
3489         int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
3490         int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
3491 
3492         if (state == arc_l2c_only)
3493                 return;
3494 
3495         ASSERT(bytes > 0);
3496         /*
3497          * Adapt the target size of the MRU list:
3498          *      - if we just hit in the MRU ghost list, then increase
3499          *        the target size of the MRU list.
3500          *      - if we just hit in the MFU ghost list, then increase
3501          *        the target size of the MFU list by decreasing the
3502          *        target size of the MRU list.
3503          */
3504         if (state == arc_mru_ghost) {
3505                 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
3506                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
3507 
3508                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
3509         } else if (state == arc_mfu_ghost) {
3510                 uint64_t delta;
3511 
3512                 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
3513                 mult = MIN(mult, 10);
3514 
3515                 delta = MIN(bytes * mult, arc_p);
3516                 arc_p = MAX(arc_p_min, arc_p - delta);
3517         }
3518         ASSERT((int64_t)arc_p >= 0);
3519 
3520         if (arc_reclaim_needed()) {
3521                 cv_signal(&arc_reclaim_thread_cv);
3522                 return;
3523         }
3524 
3525         if (arc_no_grow)
3526                 return;
3527 
3528         if (arc_c >= arc_c_max)
3529                 return;
3530 
3531         /*
3532          * If we're within (2 * maxblocksize) bytes of the target
3533          * cache size, increment the target cache size
3534          */
3535         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
3536                 atomic_add_64(&arc_c, (int64_t)bytes);
3537                 if (arc_c > arc_c_max)
3538                         arc_c = arc_c_max;
3539                 else if (state == arc_anon)
3540                         atomic_add_64(&arc_p, (int64_t)bytes);
3541                 if (arc_p > arc_c)
3542                         arc_p = arc_c;
3543         }
3544         ASSERT((int64_t)arc_p >= 0);
3545 }
3546 
3547 /*
3548  * Check if arc_size has grown past our upper threshold, determined by
3549  * zfs_arc_overflow_shift.
3550  */
3551 static boolean_t
3552 arc_is_overflowing(void)
3553 {
3554         /* Always allow at least one block of overflow */
3555         uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
3556             arc_c >> zfs_arc_overflow_shift);
3557 
3558         return (arc_size >= arc_c + overflow);
3559 }
3560 
3561 /*
3562  * The buffer, supplied as the first argument, needs a data block. If we
3563  * are hitting the hard limit for the cache size, we must sleep, waiting
3564  * for the eviction thread to catch up. If we're past the target size
3565  * but below the hard limit, we'll only signal the reclaim thread and
3566  * continue on.
3567  */
3568 static void
3569 arc_get_data_buf(arc_buf_t *buf)
3570 {
3571         arc_state_t             *state = buf->b_hdr->b_l1hdr.b_state;
3572         uint64_t                size = buf->b_hdr->b_size;
3573         arc_buf_contents_t      type = arc_buf_type(buf->b_hdr);
3574 
3575         arc_adapt(size, state);
3576 
3577         /*
3578          * If arc_size is currently overflowing, and has grown past our
3579          * upper limit, we must be adding data faster than the evict
3580          * thread can evict. Thus, to ensure we don't compound the
3581          * problem by adding more data and forcing arc_size to grow even
3582          * further past it's target size, we halt and wait for the
3583          * eviction thread to catch up.
3584          *
3585          * It's also possible that the reclaim thread is unable to evict
3586          * enough buffers to get arc_size below the overflow limit (e.g.
3587          * due to buffers being un-evictable, or hash lock collisions).
3588          * In this case, we want to proceed regardless if we're
3589          * overflowing; thus we don't use a while loop here.
3590          */
3591         if (arc_is_overflowing()) {
3592                 mutex_enter(&arc_reclaim_lock);
3593 
3594                 /*
3595                  * Now that we've acquired the lock, we may no longer be
3596                  * over the overflow limit, lets check.
3597                  *
3598                  * We're ignoring the case of spurious wake ups. If that
3599                  * were to happen, it'd let this thread consume an ARC
3600                  * buffer before it should have (i.e. before we're under
3601                  * the overflow limit and were signalled by the reclaim
3602                  * thread). As long as that is a rare occurrence, it
3603                  * shouldn't cause any harm.
3604                  */
3605                 if (arc_is_overflowing()) {
3606                         cv_signal(&arc_reclaim_thread_cv);
3607                         cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock);
3608                 }
3609 
3610                 mutex_exit(&arc_reclaim_lock);
3611         }
3612 
3613         if (type == ARC_BUFC_METADATA) {
3614                 buf->b_data = zio_buf_alloc(size);
3615                 arc_space_consume(size, ARC_SPACE_META);
3616         } else {
3617                 ASSERT(type == ARC_BUFC_DATA);
3618                 buf->b_data = zio_data_buf_alloc(size);
3619                 arc_space_consume(size, ARC_SPACE_DATA);
3620         }
3621 
3622         /*
3623          * Update the state size.  Note that ghost states have a
3624          * "ghost size" and so don't need to be updated.
3625          */
3626         if (!GHOST_STATE(buf->b_hdr->b_l1hdr.b_state)) {
3627                 arc_buf_hdr_t *hdr = buf->b_hdr;
3628                 arc_state_t *state = hdr->b_l1hdr.b_state;
3629 
3630                 (void) refcount_add_many(&state->arcs_size, size, buf);
3631 
3632                 /*
3633                  * If this is reached via arc_read, the link is
3634                  * protected by the hash lock. If reached via
3635                  * arc_buf_alloc, the header should not be accessed by
3636                  * any other thread. And, if reached via arc_read_done,
3637                  * the hash lock will protect it if it's found in the
3638                  * hash table; otherwise no other thread should be
3639                  * trying to [add|remove]_reference it.
3640                  */
3641                 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
3642                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3643                         atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type],
3644                             size);
3645                 }
3646                 /*
3647                  * If we are growing the cache, and we are adding anonymous
3648                  * data, and we have outgrown arc_p, update arc_p
3649                  */
3650                 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
3651                     (refcount_count(&arc_anon->arcs_size) +
3652                     refcount_count(&arc_mru->arcs_size) > arc_p))
3653                         arc_p = MIN(arc_c, arc_p + size);
3654         }
3655 }
3656 
3657 /*
3658  * This routine is called whenever a buffer is accessed.
3659  * NOTE: the hash lock is dropped in this function.
3660  */
3661 static void
3662 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
3663 {
3664         clock_t now;
3665 
3666         ASSERT(MUTEX_HELD(hash_lock));
3667         ASSERT(HDR_HAS_L1HDR(hdr));
3668 
3669         if (hdr->b_l1hdr.b_state == arc_anon) {
3670                 /*
3671                  * This buffer is not in the cache, and does not
3672                  * appear in our "ghost" list.  Add the new buffer
3673                  * to the MRU state.
3674                  */
3675 
3676                 ASSERT0(hdr->b_l1hdr.b_arc_access);
3677                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3678                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
3679                 arc_change_state(arc_mru, hdr, hash_lock);
3680 
3681         } else if (hdr->b_l1hdr.b_state == arc_mru) {
3682                 now = ddi_get_lbolt();
3683 
3684                 /*
3685                  * If this buffer is here because of a prefetch, then either:
3686                  * - clear the flag if this is a "referencing" read
3687                  *   (any subsequent access will bump this into the MFU state).
3688                  * or
3689                  * - move the buffer to the head of the list if this is
3690                  *   another prefetch (to make it less likely to be evicted).
3691                  */
3692                 if (HDR_PREFETCH(hdr)) {
3693                         if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
3694                                 /* link protected by hash lock */
3695                                 ASSERT(multilist_link_active(
3696                                     &hdr->b_l1hdr.b_arc_node));
3697                         } else {
3698                                 hdr->b_flags &= ~ARC_FLAG_PREFETCH;
3699                                 ARCSTAT_BUMP(arcstat_mru_hits);
3700                         }
3701                         hdr->b_l1hdr.b_arc_access = now;
3702                         return;
3703                 }
3704 
3705                 /*
3706                  * This buffer has been "accessed" only once so far,
3707                  * but it is still in the cache. Move it to the MFU
3708                  * state.
3709                  */
3710                 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) {
3711                         /*
3712                          * More than 125ms have passed since we
3713                          * instantiated this buffer.  Move it to the
3714                          * most frequently used state.
3715                          */
3716                         hdr->b_l1hdr.b_arc_access = now;
3717                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3718                         arc_change_state(arc_mfu, hdr, hash_lock);
3719                 }
3720                 ARCSTAT_BUMP(arcstat_mru_hits);
3721         } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
3722                 arc_state_t     *new_state;
3723                 /*
3724                  * This buffer has been "accessed" recently, but
3725                  * was evicted from the cache.  Move it to the
3726                  * MFU state.
3727                  */
3728 
3729                 if (HDR_PREFETCH(hdr)) {
3730                         new_state = arc_mru;
3731                         if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
3732                                 hdr->b_flags &= ~ARC_FLAG_PREFETCH;
3733                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
3734                 } else {
3735                         new_state = arc_mfu;
3736                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3737                 }
3738 
3739                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3740                 arc_change_state(new_state, hdr, hash_lock);
3741 
3742                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
3743         } else if (hdr->b_l1hdr.b_state == arc_mfu) {
3744                 /*
3745                  * This buffer has been accessed more than once and is
3746                  * still in the cache.  Keep it in the MFU state.
3747                  *
3748                  * NOTE: an add_reference() that occurred when we did
3749                  * the arc_read() will have kicked this off the list.
3750                  * If it was a prefetch, we will explicitly move it to
3751                  * the head of the list now.
3752                  */
3753                 if ((HDR_PREFETCH(hdr)) != 0) {
3754                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3755                         /* link protected by hash_lock */
3756                         ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
3757                 }
3758                 ARCSTAT_BUMP(arcstat_mfu_hits);
3759                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3760         } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
3761                 arc_state_t     *new_state = arc_mfu;
3762                 /*
3763                  * This buffer has been accessed more than once but has
3764                  * been evicted from the cache.  Move it back to the
3765                  * MFU state.
3766                  */
3767 
3768                 if (HDR_PREFETCH(hdr)) {
3769                         /*
3770                          * This is a prefetch access...
3771                          * move this block back to the MRU state.
3772                          */
3773                         ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
3774                         new_state = arc_mru;
3775                 }
3776 
3777                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3778                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3779                 arc_change_state(new_state, hdr, hash_lock);
3780 
3781                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
3782         } else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
3783                 /*
3784                  * This buffer is on the 2nd Level ARC.
3785                  */
3786 
3787                 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3788                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3789                 arc_change_state(arc_mfu, hdr, hash_lock);
3790         } else {
3791                 ASSERT(!"invalid arc state");
3792         }
3793 }
3794 
3795 /* a generic arc_done_func_t which you can use */
3796 /* ARGSUSED */
3797 void
3798 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
3799 {
3800         if (zio == NULL || zio->io_error == 0)
3801                 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
3802         VERIFY(arc_buf_remove_ref(buf, arg));
3803 }
3804 
3805 /* a generic arc_done_func_t */
3806 void
3807 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
3808 {
3809         arc_buf_t **bufp = arg;
3810         if (zio && zio->io_error) {
3811                 VERIFY(arc_buf_remove_ref(buf, arg));
3812                 *bufp = NULL;
3813         } else {
3814                 *bufp = buf;
3815                 ASSERT(buf->b_data);
3816         }
3817 }
3818 
3819 static void
3820 arc_read_done(zio_t *zio)
3821 {
3822         arc_buf_hdr_t   *hdr;
3823         arc_buf_t       *buf;
3824         arc_buf_t       *abuf;  /* buffer we're assigning to callback */
3825         kmutex_t        *hash_lock = NULL;
3826         arc_callback_t  *callback_list, *acb;
3827         int             freeable = FALSE;
3828 
3829         buf = zio->io_private;
3830         hdr = buf->b_hdr;
3831 
3832         /*
3833          * The hdr was inserted into hash-table and removed from lists
3834          * prior to starting I/O.  We should find this header, since
3835          * it's in the hash table, and it should be legit since it's
3836          * not possible to evict it during the I/O.  The only possible
3837          * reason for it not to be found is if we were freed during the
3838          * read.
3839          */
3840         if (HDR_IN_HASH_TABLE(hdr)) {
3841                 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3842                 ASSERT3U(hdr->b_dva.dva_word[0], ==,
3843                     BP_IDENTITY(zio->io_bp)->dva_word[0]);
3844                 ASSERT3U(hdr->b_dva.dva_word[1], ==,
3845                     BP_IDENTITY(zio->io_bp)->dva_word[1]);
3846 
3847                 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3848                     &hash_lock);
3849 
3850                 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3851                     hash_lock == NULL) ||
3852                     (found == hdr &&
3853                     DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3854                     (found == hdr && HDR_L2_READING(hdr)));
3855         }
3856 
3857         hdr->b_flags &= ~ARC_FLAG_L2_EVICTED;
3858         if (l2arc_noprefetch && HDR_PREFETCH(hdr))
3859                 hdr->b_flags &= ~ARC_FLAG_L2CACHE;
3860 
3861         /* byteswap if necessary */
3862         callback_list = hdr->b_l1hdr.b_acb;
3863         ASSERT(callback_list != NULL);
3864         if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3865                 dmu_object_byteswap_t bswap =
3866                     DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3867                 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3868                     byteswap_uint64_array :
3869                     dmu_ot_byteswap[bswap].ob_func;
3870                 func(buf->b_data, hdr->b_size);
3871         }
3872 
3873         arc_cksum_compute(buf, B_FALSE);
3874         arc_buf_watch(buf);
3875 
3876         if (hash_lock && zio->io_error == 0 &&
3877             hdr->b_l1hdr.b_state == arc_anon) {
3878                 /*
3879                  * Only call arc_access on anonymous buffers.  This is because
3880                  * if we've issued an I/O for an evicted buffer, we've already
3881                  * called arc_access (to prevent any simultaneous readers from
3882                  * getting confused).
3883                  */
3884                 arc_access(hdr, hash_lock);
3885         }
3886 
3887         /* create copies of the data buffer for the callers */
3888         abuf = buf;
3889         for (acb = callback_list; acb; acb = acb->acb_next) {
3890                 if (acb->acb_done) {
3891                         if (abuf == NULL) {
3892                                 ARCSTAT_BUMP(arcstat_duplicate_reads);
3893                                 abuf = arc_buf_clone(buf);
3894                         }
3895                         acb->acb_buf = abuf;
3896                         abuf = NULL;
3897                 }
3898         }
3899         hdr->b_l1hdr.b_acb = NULL;
3900         hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
3901         ASSERT(!HDR_BUF_AVAILABLE(hdr));
3902         if (abuf == buf) {
3903                 ASSERT(buf->b_efunc == NULL);
3904                 ASSERT(hdr->b_l1hdr.b_datacnt == 1);
3905                 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
3906         }
3907 
3908         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
3909             callback_list != NULL);
3910 
3911         if (zio->io_error != 0) {
3912                 hdr->b_flags |= ARC_FLAG_IO_ERROR;
3913                 if (hdr->b_l1hdr.b_state != arc_anon)
3914                         arc_change_state(arc_anon, hdr, hash_lock);
3915                 if (HDR_IN_HASH_TABLE(hdr))
3916                         buf_hash_remove(hdr);
3917                 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
3918         }
3919 
3920         /*
3921          * Broadcast before we drop the hash_lock to avoid the possibility
3922          * that the hdr (and hence the cv) might be freed before we get to
3923          * the cv_broadcast().
3924          */
3925         cv_broadcast(&hdr->b_l1hdr.b_cv);
3926 
3927         if (hash_lock != NULL) {
3928                 mutex_exit(hash_lock);
3929         } else {
3930                 /*
3931                  * This block was freed while we waited for the read to
3932                  * complete.  It has been removed from the hash table and
3933                  * moved to the anonymous state (so that it won't show up
3934                  * in the cache).
3935                  */
3936                 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
3937                 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
3938         }
3939 
3940         /* execute each callback and free its structure */
3941         while ((acb = callback_list) != NULL) {
3942                 if (acb->acb_done)
3943                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3944 
3945                 if (acb->acb_zio_dummy != NULL) {
3946                         acb->acb_zio_dummy->io_error = zio->io_error;
3947                         zio_nowait(acb->acb_zio_dummy);
3948                 }
3949 
3950                 callback_list = acb->acb_next;
3951                 kmem_free(acb, sizeof (arc_callback_t));
3952         }
3953 
3954         if (freeable)
3955                 arc_hdr_destroy(hdr);
3956 }
3957 
3958 /*
3959  * "Read" the block at the specified DVA (in bp) via the
3960  * cache.  If the block is found in the cache, invoke the provided
3961  * callback immediately and return.  Note that the `zio' parameter
3962  * in the callback will be NULL in this case, since no IO was
3963  * required.  If the block is not in the cache pass the read request
3964  * on to the spa with a substitute callback function, so that the
3965  * requested block will be added to the cache.
3966  *
3967  * If a read request arrives for a block that has a read in-progress,
3968  * either wait for the in-progress read to complete (and return the
3969  * results); or, if this is a read with a "done" func, add a record
3970  * to the read to invoke the "done" func when the read completes,
3971  * and return; or just return.
3972  *
3973  * arc_read_done() will invoke all the requested "done" functions
3974  * for readers of this block.
3975  */
3976 int
3977 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3978     void *private, zio_priority_t priority, int zio_flags,
3979     arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
3980 {
3981         arc_buf_hdr_t *hdr = NULL;
3982         arc_buf_t *buf = NULL;
3983         kmutex_t *hash_lock = NULL;
3984         zio_t *rzio;
3985         uint64_t guid = spa_load_guid(spa);
3986 
3987         ASSERT(!BP_IS_EMBEDDED(bp) ||
3988             BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3989 
3990 top:
3991         if (!BP_IS_EMBEDDED(bp)) {
3992                 /*
3993                  * Embedded BP's have no DVA and require no I/O to "read".
3994                  * Create an anonymous arc buf to back it.
3995                  */
3996                 hdr = buf_hash_find(guid, bp, &hash_lock);
3997         }
3998 
3999         if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_datacnt > 0) {
4000 
4001                 *arc_flags |= ARC_FLAG_CACHED;
4002 
4003                 if (HDR_IO_IN_PROGRESS(hdr)) {
4004 
4005                         if (*arc_flags & ARC_FLAG_WAIT) {
4006                                 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
4007                                 mutex_exit(hash_lock);
4008                                 goto top;
4009                         }
4010                         ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
4011 
4012                         if (done) {
4013                                 arc_callback_t  *acb = NULL;
4014 
4015                                 acb = kmem_zalloc(sizeof (arc_callback_t),
4016                                     KM_SLEEP);
4017                                 acb->acb_done = done;
4018                                 acb->acb_private = private;
4019                                 if (pio != NULL)
4020                                         acb->acb_zio_dummy = zio_null(pio,
4021                                             spa, NULL, NULL, NULL, zio_flags);
4022 
4023                                 ASSERT(acb->acb_done != NULL);
4024                                 acb->acb_next = hdr->b_l1hdr.b_acb;
4025                                 hdr->b_l1hdr.b_acb = acb;
4026                                 add_reference(hdr, hash_lock, private);
4027                                 mutex_exit(hash_lock);
4028                                 return (0);
4029                         }
4030                         mutex_exit(hash_lock);
4031                         return (0);
4032                 }
4033 
4034                 ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
4035                     hdr->b_l1hdr.b_state == arc_mfu);
4036 
4037                 if (done) {
4038                         add_reference(hdr, hash_lock, private);
4039                         /*
4040                          * If this block is already in use, create a new
4041                          * copy of the data so that we will be guaranteed
4042                          * that arc_release() will always succeed.
4043                          */
4044                         buf = hdr->b_l1hdr.b_buf;
4045                         ASSERT(buf);
4046                         ASSERT(buf->b_data);
4047                         if (HDR_BUF_AVAILABLE(hdr)) {
4048                                 ASSERT(buf->b_efunc == NULL);
4049                                 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
4050                         } else {
4051                                 buf = arc_buf_clone(buf);
4052                         }
4053 
4054                 } else if (*arc_flags & ARC_FLAG_PREFETCH &&
4055                     refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
4056                         hdr->b_flags |= ARC_FLAG_PREFETCH;
4057                 }
4058                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
4059                 arc_access(hdr, hash_lock);
4060                 if (*arc_flags & ARC_FLAG_L2CACHE)
4061                         hdr->b_flags |= ARC_FLAG_L2CACHE;
4062                 if (*arc_flags & ARC_FLAG_L2COMPRESS)
4063                         hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4064                 mutex_exit(hash_lock);
4065                 ARCSTAT_BUMP(arcstat_hits);
4066                 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4067                     demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4068                     data, metadata, hits);
4069 
4070                 if (done)
4071                         done(NULL, buf, private);
4072         } else {
4073                 uint64_t size = BP_GET_LSIZE(bp);
4074                 arc_callback_t *acb;
4075                 vdev_t *vd = NULL;
4076                 uint64_t addr = 0;
4077                 boolean_t devw = B_FALSE;
4078                 enum zio_compress b_compress = ZIO_COMPRESS_OFF;
4079                 int32_t b_asize = 0;
4080 
4081                 if (hdr == NULL) {
4082                         /* this block is not in the cache */
4083                         arc_buf_hdr_t *exists = NULL;
4084                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
4085                         buf = arc_buf_alloc(spa, size, private, type);
4086                         hdr = buf->b_hdr;
4087                         if (!BP_IS_EMBEDDED(bp)) {
4088                                 hdr->b_dva = *BP_IDENTITY(bp);
4089                                 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
4090                                 exists = buf_hash_insert(hdr, &hash_lock);
4091                         }
4092                         if (exists != NULL) {
4093                                 /* somebody beat us to the hash insert */
4094                                 mutex_exit(hash_lock);
4095                                 buf_discard_identity(hdr);
4096                                 (void) arc_buf_remove_ref(buf, private);
4097                                 goto top; /* restart the IO request */
4098                         }
4099 
4100                         /* if this is a prefetch, we don't have a reference */
4101                         if (*arc_flags & ARC_FLAG_PREFETCH) {
4102                                 (void) remove_reference(hdr, hash_lock,
4103                                     private);
4104                                 hdr->b_flags |= ARC_FLAG_PREFETCH;
4105                         }
4106                         if (*arc_flags & ARC_FLAG_L2CACHE)
4107                                 hdr->b_flags |= ARC_FLAG_L2CACHE;
4108                         if (*arc_flags & ARC_FLAG_L2COMPRESS)
4109                                 hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4110                         if (BP_GET_LEVEL(bp) > 0)
4111                                 hdr->b_flags |= ARC_FLAG_INDIRECT;
4112                 } else {
4113                         /*
4114                          * This block is in the ghost cache. If it was L2-only
4115                          * (and thus didn't have an L1 hdr), we realloc the
4116                          * header to add an L1 hdr.
4117                          */
4118                         if (!HDR_HAS_L1HDR(hdr)) {
4119                                 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
4120                                     hdr_full_cache);
4121                         }
4122 
4123                         ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
4124                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4125                         ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4126                         ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
4127 
4128                         /* if this is a prefetch, we don't have a reference */
4129                         if (*arc_flags & ARC_FLAG_PREFETCH)
4130                                 hdr->b_flags |= ARC_FLAG_PREFETCH;
4131                         else
4132                                 add_reference(hdr, hash_lock, private);
4133                         if (*arc_flags & ARC_FLAG_L2CACHE)
4134                                 hdr->b_flags |= ARC_FLAG_L2CACHE;
4135                         if (*arc_flags & ARC_FLAG_L2COMPRESS)
4136                                 hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4137                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
4138                         buf->b_hdr = hdr;
4139                         buf->b_data = NULL;
4140                         buf->b_efunc = NULL;
4141                         buf->b_private = NULL;
4142                         buf->b_next = NULL;
4143                         hdr->b_l1hdr.b_buf = buf;
4144                         ASSERT0(hdr->b_l1hdr.b_datacnt);
4145                         hdr->b_l1hdr.b_datacnt = 1;
4146                         arc_get_data_buf(buf);
4147                         arc_access(hdr, hash_lock);
4148                 }
4149 
4150                 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
4151 
4152                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
4153                 acb->acb_done = done;
4154                 acb->acb_private = private;
4155 
4156                 ASSERT(hdr->b_l1hdr.b_acb == NULL);
4157                 hdr->b_l1hdr.b_acb = acb;
4158                 hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
4159 
4160                 if (HDR_HAS_L2HDR(hdr) &&
4161                     (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
4162                         devw = hdr->b_l2hdr.b_dev->l2ad_writing;
4163                         addr = hdr->b_l2hdr.b_daddr;
4164                         b_compress = hdr->b_l2hdr.b_compress;
4165                         b_asize = hdr->b_l2hdr.b_asize;
4166                         /*
4167                          * Lock out device removal.
4168                          */
4169                         if (vdev_is_dead(vd) ||
4170                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
4171                                 vd = NULL;
4172                 }
4173 
4174                 if (hash_lock != NULL)
4175                         mutex_exit(hash_lock);
4176 
4177                 /*
4178                  * At this point, we have a level 1 cache miss.  Try again in
4179                  * L2ARC if possible.
4180                  */
4181                 ASSERT3U(hdr->b_size, ==, size);
4182                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
4183                     uint64_t, size, zbookmark_phys_t *, zb);
4184                 ARCSTAT_BUMP(arcstat_misses);
4185                 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4186                     demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4187                     data, metadata, misses);
4188 
4189                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
4190                         /*
4191                          * Read from the L2ARC if the following are true:
4192                          * 1. The L2ARC vdev was previously cached.
4193                          * 2. This buffer still has L2ARC metadata.
4194                          * 3. This buffer isn't currently writing to the L2ARC.
4195                          * 4. The L2ARC entry wasn't evicted, which may
4196                          *    also have invalidated the vdev.
4197                          * 5. This isn't prefetch and l2arc_noprefetch is set.
4198                          */
4199                         if (HDR_HAS_L2HDR(hdr) &&
4200                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
4201                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
4202                                 l2arc_read_callback_t *cb;
4203 
4204                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
4205                                 ARCSTAT_BUMP(arcstat_l2_hits);
4206 
4207                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
4208                                     KM_SLEEP);
4209                                 cb->l2rcb_buf = buf;
4210                                 cb->l2rcb_spa = spa;
4211                                 cb->l2rcb_bp = *bp;
4212                                 cb->l2rcb_zb = *zb;
4213                                 cb->l2rcb_flags = zio_flags;
4214                                 cb->l2rcb_compress = b_compress;
4215 
4216                                 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
4217                                     addr + size < vd->vdev_psize -
4218                                     VDEV_LABEL_END_SIZE);
4219 
4220                                 /*
4221                                  * l2arc read.  The SCL_L2ARC lock will be
4222                                  * released by l2arc_read_done().
4223                                  * Issue a null zio if the underlying buffer
4224                                  * was squashed to zero size by compression.
4225                                  */
4226                                 if (b_compress == ZIO_COMPRESS_EMPTY) {
4227                                         rzio = zio_null(pio, spa, vd,
4228                                             l2arc_read_done, cb,
4229                                             zio_flags | ZIO_FLAG_DONT_CACHE |
4230                                             ZIO_FLAG_CANFAIL |
4231                                             ZIO_FLAG_DONT_PROPAGATE |
4232                                             ZIO_FLAG_DONT_RETRY);
4233                                 } else {
4234                                         rzio = zio_read_phys(pio, vd, addr,
4235                                             b_asize, buf->b_data,
4236                                             ZIO_CHECKSUM_OFF,
4237                                             l2arc_read_done, cb, priority,
4238                                             zio_flags | ZIO_FLAG_DONT_CACHE |
4239                                             ZIO_FLAG_CANFAIL |
4240                                             ZIO_FLAG_DONT_PROPAGATE |
4241                                             ZIO_FLAG_DONT_RETRY, B_FALSE);
4242                                 }
4243                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
4244                                     zio_t *, rzio);
4245                                 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
4246 
4247                                 if (*arc_flags & ARC_FLAG_NOWAIT) {
4248                                         zio_nowait(rzio);
4249                                         return (0);
4250                                 }
4251 
4252                                 ASSERT(*arc_flags & ARC_FLAG_WAIT);
4253                                 if (zio_wait(rzio) == 0)
4254                                         return (0);
4255 
4256                                 /* l2arc read error; goto zio_read() */
4257                         } else {
4258                                 DTRACE_PROBE1(l2arc__miss,
4259                                     arc_buf_hdr_t *, hdr);
4260                                 ARCSTAT_BUMP(arcstat_l2_misses);
4261                                 if (HDR_L2_WRITING(hdr))
4262                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
4263                                 spa_config_exit(spa, SCL_L2ARC, vd);
4264                         }
4265                 } else {
4266                         if (vd != NULL)
4267                                 spa_config_exit(spa, SCL_L2ARC, vd);
4268                         if (l2arc_ndev != 0) {
4269                                 DTRACE_PROBE1(l2arc__miss,
4270                                     arc_buf_hdr_t *, hdr);
4271                                 ARCSTAT_BUMP(arcstat_l2_misses);
4272                         }
4273                 }
4274 
4275                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
4276                     arc_read_done, buf, priority, zio_flags, zb);
4277 
4278                 if (*arc_flags & ARC_FLAG_WAIT)
4279                         return (zio_wait(rzio));
4280 
4281                 ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
4282                 zio_nowait(rzio);
4283         }
4284         return (0);
4285 }
4286 
4287 void
4288 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
4289 {
4290         ASSERT(buf->b_hdr != NULL);
4291         ASSERT(buf->b_hdr->b_l1hdr.b_state != arc_anon);
4292         ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt) ||
4293             func == NULL);
4294         ASSERT(buf->b_efunc == NULL);
4295         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
4296 
4297         buf->b_efunc = func;
4298         buf->b_private = private;
4299 }
4300 
4301 /*
4302  * Notify the arc that a block was freed, and thus will never be used again.
4303  */
4304 void
4305 arc_freed(spa_t *spa, const blkptr_t *bp)
4306 {
4307         arc_buf_hdr_t *hdr;
4308         kmutex_t *hash_lock;
4309         uint64_t guid = spa_load_guid(spa);
4310 
4311         ASSERT(!BP_IS_EMBEDDED(bp));
4312 
4313         hdr = buf_hash_find(guid, bp, &hash_lock);
4314         if (hdr == NULL)
4315                 return;
4316         if (HDR_BUF_AVAILABLE(hdr)) {
4317                 arc_buf_t *buf = hdr->b_l1hdr.b_buf;
4318                 add_reference(hdr, hash_lock, FTAG);
4319                 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
4320                 mutex_exit(hash_lock);
4321 
4322                 arc_release(buf, FTAG);
4323                 (void) arc_buf_remove_ref(buf, FTAG);
4324         } else {
4325                 mutex_exit(hash_lock);
4326         }
4327 
4328 }
4329 
4330 /*
4331  * Clear the user eviction callback set by arc_set_callback(), first calling
4332  * it if it exists.  Because the presence of a callback keeps an arc_buf cached
4333  * clearing the callback may result in the arc_buf being destroyed.  However,
4334  * it will not result in the *last* arc_buf being destroyed, hence the data
4335  * will remain cached in the ARC. We make a copy of the arc buffer here so
4336  * that we can process the callback without holding any locks.
4337  *
4338  * It's possible that the callback is already in the process of being cleared
4339  * by another thread.  In this case we can not clear the callback.
4340  *
4341  * Returns B_TRUE if the callback was successfully called and cleared.
4342  */
4343 boolean_t
4344 arc_clear_callback(arc_buf_t *buf)
4345 {
4346         arc_buf_hdr_t *hdr;
4347         kmutex_t *hash_lock;
4348         arc_evict_func_t *efunc = buf->b_efunc;
4349         void *private = buf->b_private;
4350 
4351         mutex_enter(&buf->b_evict_lock);
4352         hdr = buf->b_hdr;
4353         if (hdr == NULL) {
4354                 /*
4355                  * We are in arc_do_user_evicts().
4356                  */
4357                 ASSERT(buf->b_data == NULL);
4358                 mutex_exit(&buf->b_evict_lock);
4359                 return (B_FALSE);
4360         } else if (buf->b_data == NULL) {
4361                 /*
4362                  * We are on the eviction list; process this buffer now
4363                  * but let arc_do_user_evicts() do the reaping.
4364                  */
4365                 buf->b_efunc = NULL;
4366                 mutex_exit(&buf->b_evict_lock);
4367                 VERIFY0(efunc(private));
4368                 return (B_TRUE);
4369         }
4370         hash_lock = HDR_LOCK(hdr);
4371         mutex_enter(hash_lock);
4372         hdr = buf->b_hdr;
4373         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4374 
4375         ASSERT3U(refcount_count(&hdr->b_l1hdr.b_refcnt), <,
4376             hdr->b_l1hdr.b_datacnt);
4377         ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
4378             hdr->b_l1hdr.b_state == arc_mfu);
4379 
4380         buf->b_efunc = NULL;
4381         buf->b_private = NULL;
4382 
4383         if (hdr->b_l1hdr.b_datacnt > 1) {
4384                 mutex_exit(&buf->b_evict_lock);
4385                 arc_buf_destroy(buf, TRUE);
4386         } else {
4387                 ASSERT(buf == hdr->b_l1hdr.b_buf);
4388                 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
4389                 mutex_exit(&buf->b_evict_lock);
4390         }
4391 
4392         mutex_exit(hash_lock);
4393         VERIFY0(efunc(private));
4394         return (B_TRUE);
4395 }
4396 
4397 /*
4398  * Release this buffer from the cache, making it an anonymous buffer.  This
4399  * must be done after a read and prior to modifying the buffer contents.
4400  * If the buffer has more than one reference, we must make
4401  * a new hdr for the buffer.
4402  */
4403 void
4404 arc_release(arc_buf_t *buf, void *tag)
4405 {
4406         arc_buf_hdr_t *hdr = buf->b_hdr;
4407 
4408         /*
4409          * It would be nice to assert that if it's DMU metadata (level >
4410          * 0 || it's the dnode file), then it must be syncing context.
4411          * But we don't know that information at this level.
4412          */
4413 
4414         mutex_enter(&buf->b_evict_lock);
4415 
4416         ASSERT(HDR_HAS_L1HDR(hdr));
4417 
4418         /*
4419          * We don't grab the hash lock prior to this check, because if
4420          * the buffer's header is in the arc_anon state, it won't be
4421          * linked into the hash table.
4422          */
4423         if (hdr->b_l1hdr.b_state == arc_anon) {
4424                 mutex_exit(&buf->b_evict_lock);
4425                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4426                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
4427                 ASSERT(!HDR_HAS_L2HDR(hdr));
4428                 ASSERT(BUF_EMPTY(hdr));
4429 
4430                 ASSERT3U(hdr->b_l1hdr.b_datacnt, ==, 1);
4431                 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
4432                 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
4433 
4434                 ASSERT3P(buf->b_efunc, ==, NULL);
4435                 ASSERT3P(buf->b_private, ==, NULL);
4436 
4437                 hdr->b_l1hdr.b_arc_access = 0;
4438                 arc_buf_thaw(buf);
4439 
4440                 return;
4441         }
4442 
4443         kmutex_t *hash_lock = HDR_LOCK(hdr);
4444         mutex_enter(hash_lock);
4445 
4446         /*
4447          * This assignment is only valid as long as the hash_lock is
4448          * held, we must be careful not to reference state or the
4449          * b_state field after dropping the lock.
4450          */
4451         arc_state_t *state = hdr->b_l1hdr.b_state;
4452         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4453         ASSERT3P(state, !=, arc_anon);
4454 
4455         /* this buffer is not on any list */
4456         ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0);
4457 
4458         if (HDR_HAS_L2HDR(hdr)) {
4459                 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
4460 
4461                 /*
4462                  * We have to recheck this conditional again now that
4463                  * we're holding the l2ad_mtx to prevent a race with
4464                  * another thread which might be concurrently calling
4465                  * l2arc_evict(). In that case, l2arc_evict() might have
4466                  * destroyed the header's L2 portion as we were waiting
4467                  * to acquire the l2ad_mtx.
4468                  */
4469                 if (HDR_HAS_L2HDR(hdr))
4470                         arc_hdr_l2hdr_destroy(hdr);
4471 
4472                 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
4473         }
4474 
4475         /*
4476          * Do we have more than one buf?
4477          */
4478         if (hdr->b_l1hdr.b_datacnt > 1) {
4479                 arc_buf_hdr_t *nhdr;
4480                 arc_buf_t **bufp;
4481                 uint64_t blksz = hdr->b_size;
4482                 uint64_t spa = hdr->b_spa;
4483                 arc_buf_contents_t type = arc_buf_type(hdr);
4484                 uint32_t flags = hdr->b_flags;
4485 
4486                 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
4487                 /*
4488                  * Pull the data off of this hdr and attach it to
4489                  * a new anonymous hdr.
4490                  */
4491                 (void) remove_reference(hdr, hash_lock, tag);
4492                 bufp = &hdr->b_l1hdr.b_buf;
4493                 while (*bufp != buf)
4494                         bufp = &(*bufp)->b_next;
4495                 *bufp = buf->b_next;
4496                 buf->b_next = NULL;
4497 
4498                 ASSERT3P(state, !=, arc_l2c_only);
4499 
4500                 (void) refcount_remove_many(
4501                     &state->arcs_size, hdr->b_size, buf);
4502 
4503                 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
4504                         ASSERT3P(state, !=, arc_l2c_only);
4505                         uint64_t *size = &state->arcs_lsize[type];
4506                         ASSERT3U(*size, >=, hdr->b_size);
4507                         atomic_add_64(size, -hdr->b_size);
4508                 }
4509 
4510                 /*
4511                  * We're releasing a duplicate user data buffer, update
4512                  * our statistics accordingly.
4513                  */
4514                 if (HDR_ISTYPE_DATA(hdr)) {
4515                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
4516                         ARCSTAT_INCR(arcstat_duplicate_buffers_size,
4517                             -hdr->b_size);
4518                 }
4519                 hdr->b_l1hdr.b_datacnt -= 1;
4520                 arc_cksum_verify(buf);
4521                 arc_buf_unwatch(buf);
4522 
4523                 mutex_exit(hash_lock);
4524 
4525                 nhdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
4526                 nhdr->b_size = blksz;
4527                 nhdr->b_spa = spa;
4528 
4529                 nhdr->b_flags = flags & ARC_FLAG_L2_WRITING;
4530                 nhdr->b_flags |= arc_bufc_to_flags(type);
4531                 nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
4532 
4533                 nhdr->b_l1hdr.b_buf = buf;
4534                 nhdr->b_l1hdr.b_datacnt = 1;
4535                 nhdr->b_l1hdr.b_state = arc_anon;
4536                 nhdr->b_l1hdr.b_arc_access = 0;
4537                 nhdr->b_l1hdr.b_tmp_cdata = NULL;
4538                 nhdr->b_freeze_cksum = NULL;
4539 
4540                 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
4541                 buf->b_hdr = nhdr;
4542                 mutex_exit(&buf->b_evict_lock);
4543                 (void) refcount_add_many(&arc_anon->arcs_size, blksz, buf);
4544         } else {
4545                 mutex_exit(&buf->b_evict_lock);
4546                 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
4547                 /* protected by hash lock, or hdr is on arc_anon */
4548                 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
4549                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4550                 arc_change_state(arc_anon, hdr, hash_lock);
4551                 hdr->b_l1hdr.b_arc_access = 0;
4552                 mutex_exit(hash_lock);
4553 
4554                 buf_discard_identity(hdr);
4555                 arc_buf_thaw(buf);
4556         }
4557         buf->b_efunc = NULL;
4558         buf->b_private = NULL;
4559 }
4560 
4561 int
4562 arc_released(arc_buf_t *buf)
4563 {
4564         int released;
4565 
4566         mutex_enter(&buf->b_evict_lock);
4567         released = (buf->b_data != NULL &&
4568             buf->b_hdr->b_l1hdr.b_state == arc_anon);
4569         mutex_exit(&buf->b_evict_lock);
4570         return (released);
4571 }
4572 
4573 #ifdef ZFS_DEBUG
4574 int
4575 arc_referenced(arc_buf_t *buf)
4576 {
4577         int referenced;
4578 
4579         mutex_enter(&buf->b_evict_lock);
4580         referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
4581         mutex_exit(&buf->b_evict_lock);
4582         return (referenced);
4583 }
4584 #endif
4585 
4586 static void
4587 arc_write_ready(zio_t *zio)
4588 {
4589         arc_write_callback_t *callback = zio->io_private;
4590         arc_buf_t *buf = callback->awcb_buf;
4591         arc_buf_hdr_t *hdr = buf->b_hdr;
4592 
4593         ASSERT(HDR_HAS_L1HDR(hdr));
4594         ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
4595         ASSERT(hdr->b_l1hdr.b_datacnt > 0);
4596         callback->awcb_ready(zio, buf, callback->awcb_private);
4597 
4598         /*
4599          * If the IO is already in progress, then this is a re-write
4600          * attempt, so we need to thaw and re-compute the cksum.
4601          * It is the responsibility of the callback to handle the
4602          * accounting for any re-write attempt.
4603          */
4604         if (HDR_IO_IN_PROGRESS(hdr)) {
4605                 mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
4606                 if (hdr->b_freeze_cksum != NULL) {
4607                         kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
4608                         hdr->b_freeze_cksum = NULL;
4609                 }
4610                 mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
4611         }
4612         arc_cksum_compute(buf, B_FALSE);
4613         hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
4614 }
4615 
4616 /*
4617  * The SPA calls this callback for each physical write that happens on behalf
4618  * of a logical write.  See the comment in dbuf_write_physdone() for details.
4619  */
4620 static void
4621 arc_write_physdone(zio_t *zio)
4622 {
4623         arc_write_callback_t *cb = zio->io_private;
4624         if (cb->awcb_physdone != NULL)
4625                 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
4626 }
4627 
4628 static void
4629 arc_write_done(zio_t *zio)
4630 {
4631         arc_write_callback_t *callback = zio->io_private;
4632         arc_buf_t *buf = callback->awcb_buf;
4633         arc_buf_hdr_t *hdr = buf->b_hdr;
4634 
4635         ASSERT(hdr->b_l1hdr.b_acb == NULL);
4636 
4637         if (zio->io_error == 0) {
4638                 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
4639                         buf_discard_identity(hdr);
4640                 } else {
4641                         hdr->b_dva = *BP_IDENTITY(zio->io_bp);
4642                         hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
4643                 }
4644         } else {
4645                 ASSERT(BUF_EMPTY(hdr));
4646         }
4647 
4648         /*
4649          * If the block to be written was all-zero or compressed enough to be
4650          * embedded in the BP, no write was performed so there will be no
4651          * dva/birth/checksum.  The buffer must therefore remain anonymous
4652          * (and uncached).
4653          */
4654         if (!BUF_EMPTY(hdr)) {
4655                 arc_buf_hdr_t *exists;
4656                 kmutex_t *hash_lock;
4657 
4658                 ASSERT(zio->io_error == 0);
4659 
4660                 arc_cksum_verify(buf);
4661 
4662                 exists = buf_hash_insert(hdr, &hash_lock);
4663                 if (exists != NULL) {
4664                         /*
4665                          * This can only happen if we overwrite for
4666                          * sync-to-convergence, because we remove
4667                          * buffers from the hash table when we arc_free().
4668                          */
4669                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
4670                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
4671                                         panic("bad overwrite, hdr=%p exists=%p",
4672                                             (void *)hdr, (void *)exists);
4673                                 ASSERT(refcount_is_zero(
4674                                     &exists->b_l1hdr.b_refcnt));
4675                                 arc_change_state(arc_anon, exists, hash_lock);
4676                                 mutex_exit(hash_lock);
4677                                 arc_hdr_destroy(exists);
4678                                 exists = buf_hash_insert(hdr, &hash_lock);
4679                                 ASSERT3P(exists, ==, NULL);
4680                         } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
4681                                 /* nopwrite */
4682                                 ASSERT(zio->io_prop.zp_nopwrite);
4683                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
4684                                         panic("bad nopwrite, hdr=%p exists=%p",
4685                                             (void *)hdr, (void *)exists);
4686                         } else {
4687                                 /* Dedup */
4688                                 ASSERT(hdr->b_l1hdr.b_datacnt == 1);
4689                                 ASSERT(hdr->b_l1hdr.b_state == arc_anon);
4690                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
4691                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
4692                         }
4693                 }
4694                 hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
4695                 /* if it's not anon, we are doing a scrub */
4696                 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
4697                         arc_access(hdr, hash_lock);
4698                 mutex_exit(hash_lock);
4699         } else {
4700                 hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
4701         }
4702 
4703         ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4704         callback->awcb_done(zio, buf, callback->awcb_private);
4705 
4706         kmem_free(callback, sizeof (arc_write_callback_t));
4707 }
4708 
4709 zio_t *
4710 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
4711     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
4712     const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
4713     arc_done_func_t *done, void *private, zio_priority_t priority,
4714     int zio_flags, const zbookmark_phys_t *zb)
4715 {
4716         arc_buf_hdr_t *hdr = buf->b_hdr;
4717         arc_write_callback_t *callback;
4718         zio_t *zio;
4719 
4720         ASSERT(ready != NULL);
4721         ASSERT(done != NULL);
4722         ASSERT(!HDR_IO_ERROR(hdr));
4723         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4724         ASSERT(hdr->b_l1hdr.b_acb == NULL);
4725         ASSERT(hdr->b_l1hdr.b_datacnt > 0);
4726         if (l2arc)
4727                 hdr->b_flags |= ARC_FLAG_L2CACHE;
4728         if (l2arc_compress)
4729                 hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4730         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
4731         callback->awcb_ready = ready;
4732         callback->awcb_physdone = physdone;
4733         callback->awcb_done = done;
4734         callback->awcb_private = private;
4735         callback->awcb_buf = buf;
4736 
4737         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
4738             arc_write_ready, arc_write_physdone, arc_write_done, callback,
4739             priority, zio_flags, zb);
4740 
4741         return (zio);
4742 }
4743 
4744 static int
4745 arc_memory_throttle(uint64_t reserve, uint64_t txg)
4746 {
4747 #ifdef _KERNEL
4748         uint64_t available_memory = ptob(freemem);
4749         static uint64_t page_load = 0;
4750         static uint64_t last_txg = 0;
4751 
4752 #if defined(__i386)
4753         available_memory =
4754             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
4755 #endif
4756 
4757         if (freemem > physmem * arc_lotsfree_percent / 100)
4758                 return (0);
4759 
4760         if (txg > last_txg) {
4761                 last_txg = txg;
4762                 page_load = 0;
4763         }
4764         /*
4765          * If we are in pageout, we know that memory is already tight,
4766          * the arc is already going to be evicting, so we just want to
4767          * continue to let page writes occur as quickly as possible.
4768          */
4769         if (curproc == proc_pageout) {
4770                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
4771                         return (SET_ERROR(ERESTART));
4772                 /* Note: reserve is inflated, so we deflate */
4773                 page_load += reserve / 8;
4774                 return (0);
4775         } else if (page_load > 0 && arc_reclaim_needed()) {
4776                 /* memory is low, delay before restarting */
4777                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
4778                 return (SET_ERROR(EAGAIN));
4779         }
4780         page_load = 0;
4781 #endif
4782         return (0);
4783 }
4784 
4785 void
4786 arc_tempreserve_clear(uint64_t reserve)
4787 {
4788         atomic_add_64(&arc_tempreserve, -reserve);
4789         ASSERT((int64_t)arc_tempreserve >= 0);
4790 }
4791 
4792 int
4793 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
4794 {
4795         int error;
4796         uint64_t anon_size;
4797 
4798         if (reserve > arc_c/4 && !arc_no_grow)
4799                 arc_c = MIN(arc_c_max, reserve * 4);
4800         if (reserve > arc_c)
4801                 return (SET_ERROR(ENOMEM));
4802 
4803         /*
4804          * Don't count loaned bufs as in flight dirty data to prevent long
4805          * network delays from blocking transactions that are ready to be
4806          * assigned to a txg.
4807          */
4808         anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
4809             arc_loaned_bytes), 0);
4810 
4811         /*
4812          * Writes will, almost always, require additional memory allocations
4813          * in order to compress/encrypt/etc the data.  We therefore need to
4814          * make sure that there is sufficient available memory for this.
4815          */
4816         error = arc_memory_throttle(reserve, txg);
4817         if (error != 0)
4818                 return (error);
4819 
4820         /*
4821          * Throttle writes when the amount of dirty data in the cache
4822          * gets too large.  We try to keep the cache less than half full
4823          * of dirty blocks so that our sync times don't grow too large.
4824          * Note: if two requests come in concurrently, we might let them
4825          * both succeed, when one of them should fail.  Not a huge deal.
4826          */
4827 
4828         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
4829             anon_size > arc_c / 4) {
4830                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
4831                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
4832                     arc_tempreserve>>10,
4833                     arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
4834                     arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
4835                     reserve>>10, arc_c>>10);
4836                 return (SET_ERROR(ERESTART));
4837         }
4838         atomic_add_64(&arc_tempreserve, reserve);
4839         return (0);
4840 }
4841 
4842 static void
4843 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
4844     kstat_named_t *evict_data, kstat_named_t *evict_metadata)
4845 {
4846         size->value.ui64 = refcount_count(&state->arcs_size);
4847         evict_data->value.ui64 = state->arcs_lsize[ARC_BUFC_DATA];
4848         evict_metadata->value.ui64 = state->arcs_lsize[ARC_BUFC_METADATA];
4849 }
4850 
4851 static int
4852 arc_kstat_update(kstat_t *ksp, int rw)
4853 {
4854         arc_stats_t *as = ksp->ks_data;
4855 
4856         if (rw == KSTAT_WRITE) {
4857                 return (EACCES);
4858         } else {
4859                 arc_kstat_update_state(arc_anon,
4860                     &as->arcstat_anon_size,
4861                     &as->arcstat_anon_evictable_data,
4862                     &as->arcstat_anon_evictable_metadata);
4863                 arc_kstat_update_state(arc_mru,
4864                     &as->arcstat_mru_size,
4865                     &as->arcstat_mru_evictable_data,
4866                     &as->arcstat_mru_evictable_metadata);
4867                 arc_kstat_update_state(arc_mru_ghost,
4868                     &as->arcstat_mru_ghost_size,
4869                     &as->arcstat_mru_ghost_evictable_data,
4870                     &as->arcstat_mru_ghost_evictable_metadata);
4871                 arc_kstat_update_state(arc_mfu,
4872                     &as->arcstat_mfu_size,
4873                     &as->arcstat_mfu_evictable_data,
4874                     &as->arcstat_mfu_evictable_metadata);
4875                 arc_kstat_update_state(arc_mfu_ghost,
4876                     &as->arcstat_mfu_ghost_size,
4877                     &as->arcstat_mfu_ghost_evictable_data,
4878                     &as->arcstat_mfu_ghost_evictable_metadata);
4879         }
4880 
4881         return (0);
4882 }
4883 
4884 /*
4885  * This function *must* return indices evenly distributed between all
4886  * sublists of the multilist. This is needed due to how the ARC eviction
4887  * code is laid out; arc_evict_state() assumes ARC buffers are evenly
4888  * distributed between all sublists and uses this assumption when
4889  * deciding which sublist to evict from and how much to evict from it.
4890  */
4891 unsigned int
4892 arc_state_multilist_index_func(multilist_t *ml, void *obj)
4893 {
4894         arc_buf_hdr_t *hdr = obj;
4895 
4896         /*
4897          * We rely on b_dva to generate evenly distributed index
4898          * numbers using buf_hash below. So, as an added precaution,
4899          * let's make sure we never add empty buffers to the arc lists.
4900          */
4901         ASSERT(!BUF_EMPTY(hdr));
4902 
4903         /*
4904          * The assumption here, is the hash value for a given
4905          * arc_buf_hdr_t will remain constant throughout it's lifetime
4906          * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
4907          * Thus, we don't need to store the header's sublist index
4908          * on insertion, as this index can be recalculated on removal.
4909          *
4910          * Also, the low order bits of the hash value are thought to be
4911          * distributed evenly. Otherwise, in the case that the multilist
4912          * has a power of two number of sublists, each sublists' usage
4913          * would not be evenly distributed.
4914          */
4915         return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
4916             multilist_get_num_sublists(ml));
4917 }
4918 
4919 void
4920 arc_init(void)
4921 {
4922         /*
4923          * allmem is "all memory that we could possibly use".
4924          */
4925 #ifdef _KERNEL
4926         uint64_t allmem = ptob(physmem - swapfs_minfree);
4927 #else
4928         uint64_t allmem = (physmem * PAGESIZE) / 2;
4929 #endif
4930 
4931         mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
4932         cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
4933         cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
4934 
4935         mutex_init(&arc_user_evicts_lock, NULL, MUTEX_DEFAULT, NULL);
4936         cv_init(&arc_user_evicts_cv, NULL, CV_DEFAULT, NULL);
4937 
4938         /* Convert seconds to clock ticks */
4939         arc_min_prefetch_lifespan = 1 * hz;
4940 
4941         /* Start out with 1/8 of all memory */
4942         arc_c = allmem / 8;
4943 
4944 #ifdef _KERNEL
4945         /*
4946          * On architectures where the physical memory can be larger
4947          * than the addressable space (intel in 32-bit mode), we may
4948          * need to limit the cache to 1/8 of VM size.
4949          */
4950         arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4951 #endif
4952 
4953         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
4954         arc_c_min = MAX(allmem / 32, 64 << 20);
4955         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
4956         if (allmem >= 1 << 30)
4957                 arc_c_max = allmem - (1 << 30);
4958         else
4959                 arc_c_max = arc_c_min;
4960         arc_c_max = MAX(allmem * 3 / 4, arc_c_max);
4961 
4962         /*
4963          * Allow the tunables to override our calculations if they are
4964          * reasonable (ie. over 64MB)
4965          */
4966         if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem)
4967                 arc_c_max = zfs_arc_max;
4968         if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max)
4969                 arc_c_min = zfs_arc_min;
4970 
4971         arc_c = arc_c_max;
4972         arc_p = (arc_c >> 1);
4973 
4974         /* limit meta-data to 1/4 of the arc capacity */
4975         arc_meta_limit = arc_c_max / 4;
4976 
4977         /* Allow the tunable to override if it is reasonable */
4978         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4979                 arc_meta_limit = zfs_arc_meta_limit;
4980 
4981         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4982                 arc_c_min = arc_meta_limit / 2;
4983 
4984         if (zfs_arc_meta_min > 0) {
4985                 arc_meta_min = zfs_arc_meta_min;
4986         } else {
4987                 arc_meta_min = arc_c_min / 2;
4988         }
4989 
4990         if (zfs_arc_grow_retry > 0)
4991                 arc_grow_retry = zfs_arc_grow_retry;
4992 
4993         if (zfs_arc_shrink_shift > 0)
4994                 arc_shrink_shift = zfs_arc_shrink_shift;
4995 
4996         /*
4997          * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
4998          */
4999         if (arc_no_grow_shift >= arc_shrink_shift)
5000                 arc_no_grow_shift = arc_shrink_shift - 1;
5001 
5002         if (zfs_arc_p_min_shift > 0)
5003                 arc_p_min_shift = zfs_arc_p_min_shift;
5004 
5005         if (zfs_arc_num_sublists_per_state < 1)
5006                 zfs_arc_num_sublists_per_state = MAX(boot_ncpus, 1);
5007 
5008         /* if kmem_flags are set, lets try to use less memory */
5009         if (kmem_debugging())
5010                 arc_c = arc_c / 2;
5011         if (arc_c < arc_c_min)
5012                 arc_c = arc_c_min;
5013 
5014         arc_anon = &ARC_anon;
5015         arc_mru = &ARC_mru;
5016         arc_mru_ghost = &ARC_mru_ghost;
5017         arc_mfu = &ARC_mfu;
5018         arc_mfu_ghost = &ARC_mfu_ghost;
5019         arc_l2c_only = &ARC_l2c_only;
5020         arc_size = 0;
5021 
5022         multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
5023             sizeof (arc_buf_hdr_t),
5024             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5025             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5026         multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
5027             sizeof (arc_buf_hdr_t),
5028             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5029             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5030         multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
5031             sizeof (arc_buf_hdr_t),
5032             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5033             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5034         multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
5035             sizeof (arc_buf_hdr_t),
5036             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5037             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5038         multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
5039             sizeof (arc_buf_hdr_t),
5040             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5041             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5042         multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
5043             sizeof (arc_buf_hdr_t),
5044             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5045             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5046         multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
5047             sizeof (arc_buf_hdr_t),
5048             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5049             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5050         multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
5051             sizeof (arc_buf_hdr_t),
5052             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5053             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5054         multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
5055             sizeof (arc_buf_hdr_t),
5056             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5057             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5058         multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
5059             sizeof (arc_buf_hdr_t),
5060             offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
5061             zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
5062 
5063         refcount_create(&arc_anon->arcs_size);
5064         refcount_create(&arc_mru->arcs_size);
5065         refcount_create(&arc_mru_ghost->arcs_size);
5066         refcount_create(&arc_mfu->arcs_size);
5067         refcount_create(&arc_mfu_ghost->arcs_size);
5068         refcount_create(&arc_l2c_only->arcs_size);
5069 
5070         buf_init();
5071 
5072         arc_reclaim_thread_exit = FALSE;
5073         arc_user_evicts_thread_exit = FALSE;
5074         arc_eviction_list = NULL;
5075         bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
5076 
5077         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
5078             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
5079 
5080         if (arc_ksp != NULL) {
5081                 arc_ksp->ks_data = &arc_stats;
5082                 arc_ksp->ks_update = arc_kstat_update;
5083                 kstat_install(arc_ksp);
5084         }
5085 
5086         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
5087             TS_RUN, minclsyspri);
5088 
5089         (void) thread_create(NULL, 0, arc_user_evicts_thread, NULL, 0, &p0,
5090             TS_RUN, minclsyspri);
5091 
5092         arc_dead = FALSE;
5093         arc_warm = B_FALSE;
5094 
5095         /*
5096          * Calculate maximum amount of dirty data per pool.
5097          *
5098          * If it has been set by /etc/system, take that.
5099          * Otherwise, use a percentage of physical memory defined by
5100          * zfs_dirty_data_max_percent (default 10%) with a cap at
5101          * zfs_dirty_data_max_max (default 4GB).
5102          */
5103         if (zfs_dirty_data_max == 0) {
5104                 zfs_dirty_data_max = physmem * PAGESIZE *
5105                     zfs_dirty_data_max_percent / 100;
5106                 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
5107                     zfs_dirty_data_max_max);
5108         }
5109 }
5110 
5111 void
5112 arc_fini(void)
5113 {
5114         mutex_enter(&arc_reclaim_lock);
5115         arc_reclaim_thread_exit = TRUE;
5116         /*
5117          * The reclaim thread will set arc_reclaim_thread_exit back to
5118          * FALSE when it is finished exiting; we're waiting for that.
5119          */
5120         while (arc_reclaim_thread_exit) {
5121                 cv_signal(&arc_reclaim_thread_cv);
5122                 cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock);
5123         }
5124         mutex_exit(&arc_reclaim_lock);
5125 
5126         mutex_enter(&arc_user_evicts_lock);
5127         arc_user_evicts_thread_exit = TRUE;
5128         /*
5129          * The user evicts thread will set arc_user_evicts_thread_exit
5130          * to FALSE when it is finished exiting; we're waiting for that.
5131          */
5132         while (arc_user_evicts_thread_exit) {
5133                 cv_signal(&arc_user_evicts_cv);
5134                 cv_wait(&arc_user_evicts_cv, &arc_user_evicts_lock);
5135         }
5136         mutex_exit(&arc_user_evicts_lock);
5137 
5138         /* Use TRUE to ensure *all* buffers are evicted */
5139         arc_flush(NULL, TRUE);
5140 
5141         arc_dead = TRUE;
5142 
5143         if (arc_ksp != NULL) {
5144                 kstat_delete(arc_ksp);
5145                 arc_ksp = NULL;
5146         }
5147 
5148         mutex_destroy(&arc_reclaim_lock);
5149         cv_destroy(&arc_reclaim_thread_cv);
5150         cv_destroy(&arc_reclaim_waiters_cv);
5151 
5152         mutex_destroy(&arc_user_evicts_lock);
5153         cv_destroy(&arc_user_evicts_cv);
5154 
5155         refcount_destroy(&arc_anon->arcs_size);
5156         refcount_destroy(&arc_mru->arcs_size);
5157         refcount_destroy(&arc_mru_ghost->arcs_size);
5158         refcount_destroy(&arc_mfu->arcs_size);
5159         refcount_destroy(&arc_mfu_ghost->arcs_size);
5160         refcount_destroy(&arc_l2c_only->arcs_size);
5161 
5162         multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
5163         multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
5164         multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
5165         multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
5166         multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
5167         multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
5168         multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
5169         multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
5170 
5171         buf_fini();
5172 
5173         ASSERT0(arc_loaned_bytes);
5174 }
5175 
5176 /*
5177  * Level 2 ARC
5178  *
5179  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
5180  * It uses dedicated storage devices to hold cached data, which are populated
5181  * using large infrequent writes.  The main role of this cache is to boost
5182  * the performance of random read workloads.  The intended L2ARC devices
5183  * include short-stroked disks, solid state disks, and other media with
5184  * substantially faster read latency than disk.
5185  *
5186  *                 +-----------------------+
5187  *                 |         ARC           |
5188  *                 +-----------------------+
5189  *                    |         ^     ^
5190  *                    |         |     |
5191  *      l2arc_feed_thread()    arc_read()
5192  *                    |         |     |
5193  *                    |  l2arc read   |
5194  *                    V         |     |
5195  *               +---------------+    |
5196  *               |     L2ARC     |    |
5197  *               +---------------+    |
5198  *                   |    ^           |
5199  *          l2arc_write() |           |
5200  *                   |    |           |
5201  *                   V    |           |
5202  *                 +-------+      +-------+
5203  *                 | vdev  |      | vdev  |
5204  *                 | cache |      | cache |
5205  *                 +-------+      +-------+
5206  *                 +=========+     .-----.
5207  *                 :  L2ARC  :    |-_____-|
5208  *                 : devices :    | Disks |
5209  *                 +=========+    `-_____-'
5210  *
5211  * Read requests are satisfied from the following sources, in order:
5212  *
5213  *      1) ARC
5214  *      2) vdev cache of L2ARC devices
5215  *      3) L2ARC devices
5216  *      4) vdev cache of disks
5217  *      5) disks
5218  *
5219  * Some L2ARC device types exhibit extremely slow write performance.
5220  * To accommodate for this there are some significant differences between
5221  * the L2ARC and traditional cache design:
5222  *
5223  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
5224  * the ARC behave as usual, freeing buffers and placing headers on ghost
5225  * lists.  The ARC does not send buffers to the L2ARC during eviction as
5226  * this would add inflated write latencies for all ARC memory pressure.
5227  *
5228  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
5229  * It does this by periodically scanning buffers from the eviction-end of
5230  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
5231  * not already there. It scans until a headroom of buffers is satisfied,
5232  * which itself is a buffer for ARC eviction. If a compressible buffer is
5233  * found during scanning and selected for writing to an L2ARC device, we
5234  * temporarily boost scanning headroom during the next scan cycle to make
5235  * sure we adapt to compression effects (which might significantly reduce
5236  * the data volume we write to L2ARC). The thread that does this is
5237  * l2arc_feed_thread(), illustrated below; example sizes are included to
5238  * provide a better sense of ratio than this diagram:
5239  *
5240  *             head -->                        tail
5241  *              +---------------------+----------+
5242  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
5243  *              +---------------------+----------+   |   o L2ARC eligible
5244  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
5245  *              +---------------------+----------+   |
5246  *                   15.9 Gbytes      ^ 32 Mbytes    |
5247  *                                 headroom          |
5248  *                                            l2arc_feed_thread()
5249  *                                                   |
5250  *                       l2arc write hand <--[oooo]--'
5251  *                               |           8 Mbyte
5252  *                               |          write max
5253  *                               V
5254  *                +==============================+
5255  *      L2ARC dev |####|#|###|###|    |####| ... |
5256  *                +==============================+
5257  *                           32 Gbytes
5258  *
5259  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
5260  * evicted, then the L2ARC has cached a buffer much sooner than it probably
5261  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
5262  * safe to say that this is an uncommon case, since buffers at the end of
5263  * the ARC lists have moved there due to inactivity.
5264  *
5265  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
5266  * then the L2ARC simply misses copying some buffers.  This serves as a
5267  * pressure valve to prevent heavy read workloads from both stalling the ARC
5268  * with waits and clogging the L2ARC with writes.  This also helps prevent
5269  * the potential for the L2ARC to churn if it attempts to cache content too
5270  * quickly, such as during backups of the entire pool.
5271  *
5272  * 5. After system boot and before the ARC has filled main memory, there are
5273  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
5274  * lists can remain mostly static.  Instead of searching from tail of these
5275  * lists as pictured, the l2arc_feed_thread() will search from the list heads
5276  * for eligible buffers, greatly increasing its chance of finding them.
5277  *
5278  * The L2ARC device write speed is also boosted during this time so that
5279  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
5280  * there are no L2ARC reads, and no fear of degrading read performance
5281  * through increased writes.
5282  *
5283  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
5284  * the vdev queue can aggregate them into larger and fewer writes.  Each
5285  * device is written to in a rotor fashion, sweeping writes through
5286  * available space then repeating.
5287  *
5288  * 7. The L2ARC does not store dirty content.  It never needs to flush
5289  * write buffers back to disk based storage.
5290  *
5291  * 8. If an ARC buffer is written (and dirtied) which also exists in the
5292  * L2ARC, the now stale L2ARC buffer is immediately dropped.
5293  *
5294  * The performance of the L2ARC can be tweaked by a number of tunables, which
5295  * may be necessary for different workloads:
5296  *
5297  *      l2arc_write_max         max write bytes per interval
5298  *      l2arc_write_boost       extra write bytes during device warmup
5299  *      l2arc_noprefetch        skip caching prefetched buffers
5300  *      l2arc_headroom          number of max device writes to precache
5301  *      l2arc_headroom_boost    when we find compressed buffers during ARC
5302  *                              scanning, we multiply headroom by this
5303  *                              percentage factor for the next scan cycle,
5304  *                              since more compressed buffers are likely to
5305  *                              be present
5306  *      l2arc_feed_secs         seconds between L2ARC writing
5307  *
5308  * Tunables may be removed or added as future performance improvements are
5309  * integrated, and also may become zpool properties.
5310  *
5311  * There are three key functions that control how the L2ARC warms up:
5312  *
5313  *      l2arc_write_eligible()  check if a buffer is eligible to cache
5314  *      l2arc_write_size()      calculate how much to write
5315  *      l2arc_write_interval()  calculate sleep delay between writes
5316  *
5317  * These three functions determine what to write, how much, and how quickly
5318  * to send writes.
5319  */
5320 
5321 static boolean_t
5322 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
5323 {
5324         /*
5325          * A buffer is *not* eligible for the L2ARC if it:
5326          * 1. belongs to a different spa.
5327          * 2. is already cached on the L2ARC.
5328          * 3. has an I/O in progress (it may be an incomplete read).
5329          * 4. is flagged not eligible (zfs property).
5330          */
5331         if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
5332             HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
5333                 return (B_FALSE);
5334 
5335         return (B_TRUE);
5336 }
5337 
5338 static uint64_t
5339 l2arc_write_size(void)
5340 {
5341         uint64_t size;
5342 
5343         /*
5344          * Make sure our globals have meaningful values in case the user
5345          * altered them.
5346          */
5347         size = l2arc_write_max;
5348         if (size == 0) {
5349                 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
5350                     "be greater than zero, resetting it to the default (%d)",
5351                     L2ARC_WRITE_SIZE);
5352                 size = l2arc_write_max = L2ARC_WRITE_SIZE;
5353         }
5354 
5355         if (arc_warm == B_FALSE)
5356                 size += l2arc_write_boost;
5357 
5358         return (size);
5359 
5360 }
5361 
5362 static clock_t
5363 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
5364 {
5365         clock_t interval, next, now;
5366 
5367         /*
5368          * If the ARC lists are busy, increase our write rate; if the
5369          * lists are stale, idle back.  This is achieved by checking
5370          * how much we previously wrote - if it was more than half of
5371          * what we wanted, schedule the next write much sooner.
5372          */
5373         if (l2arc_feed_again && wrote > (wanted / 2))
5374                 interval = (hz * l2arc_feed_min_ms) / 1000;
5375         else
5376                 interval = hz * l2arc_feed_secs;
5377 
5378         now = ddi_get_lbolt();
5379         next = MAX(now, MIN(now + interval, began + interval));
5380 
5381         return (next);
5382 }
5383 
5384 /*
5385  * Cycle through L2ARC devices.  This is how L2ARC load balances.
5386  * If a device is returned, this also returns holding the spa config lock.
5387  */
5388 static l2arc_dev_t *
5389 l2arc_dev_get_next(void)
5390 {
5391         l2arc_dev_t *first, *next = NULL;
5392 
5393         /*
5394          * Lock out the removal of spas (spa_namespace_lock), then removal
5395          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
5396          * both locks will be dropped and a spa config lock held instead.
5397          */
5398         mutex_enter(&spa_namespace_lock);
5399         mutex_enter(&l2arc_dev_mtx);
5400 
5401         /* if there are no vdevs, there is nothing to do */
5402         if (l2arc_ndev == 0)
5403                 goto out;
5404 
5405         first = NULL;
5406         next = l2arc_dev_last;
5407         do {
5408                 /* loop around the list looking for a non-faulted vdev */
5409                 if (next == NULL) {
5410                         next = list_head(l2arc_dev_list);
5411                 } else {
5412                         next = list_next(l2arc_dev_list, next);
5413                         if (next == NULL)
5414                                 next = list_head(l2arc_dev_list);
5415                 }
5416 
5417                 /* if we have come back to the start, bail out */
5418                 if (first == NULL)
5419                         first = next;
5420                 else if (next == first)
5421                         break;
5422 
5423         } while (vdev_is_dead(next->l2ad_vdev));
5424 
5425         /* if we were unable to find any usable vdevs, return NULL */
5426         if (vdev_is_dead(next->l2ad_vdev))
5427                 next = NULL;
5428 
5429         l2arc_dev_last = next;
5430 
5431 out:
5432         mutex_exit(&l2arc_dev_mtx);
5433 
5434         /*
5435          * Grab the config lock to prevent the 'next' device from being
5436          * removed while we are writing to it.
5437          */
5438         if (next != NULL)
5439                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
5440         mutex_exit(&spa_namespace_lock);
5441 
5442         return (next);
5443 }
5444 
5445 /*
5446  * Free buffers that were tagged for destruction.
5447  */
5448 static void
5449 l2arc_do_free_on_write()
5450 {
5451         list_t *buflist;
5452         l2arc_data_free_t *df, *df_prev;
5453 
5454         mutex_enter(&l2arc_free_on_write_mtx);
5455         buflist = l2arc_free_on_write;
5456 
5457         for (df = list_tail(buflist); df; df = df_prev) {
5458                 df_prev = list_prev(buflist, df);
5459                 ASSERT(df->l2df_data != NULL);
5460                 ASSERT(df->l2df_func != NULL);
5461                 df->l2df_func(df->l2df_data, df->l2df_size);
5462                 list_remove(buflist, df);
5463                 kmem_free(df, sizeof (l2arc_data_free_t));
5464         }
5465 
5466         mutex_exit(&l2arc_free_on_write_mtx);
5467 }
5468 
5469 /*
5470  * A write to a cache device has completed.  Update all headers to allow
5471  * reads from these buffers to begin.
5472  */
5473 static void
5474 l2arc_write_done(zio_t *zio)
5475 {
5476         l2arc_write_callback_t *cb;
5477         l2arc_dev_t *dev;
5478         list_t *buflist;
5479         arc_buf_hdr_t *head, *hdr, *hdr_prev;
5480         kmutex_t *hash_lock;
5481         int64_t bytes_dropped = 0;
5482 
5483         cb = zio->io_private;
5484         ASSERT(cb != NULL);
5485         dev = cb->l2wcb_dev;
5486         ASSERT(dev != NULL);
5487         head = cb->l2wcb_head;
5488         ASSERT(head != NULL);
5489         buflist = &dev->l2ad_buflist;
5490         ASSERT(buflist != NULL);
5491         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
5492             l2arc_write_callback_t *, cb);
5493 
5494         if (zio->io_error != 0)
5495                 ARCSTAT_BUMP(arcstat_l2_writes_error);
5496 
5497         /*
5498          * All writes completed, or an error was hit.
5499          */
5500 top:
5501         mutex_enter(&dev->l2ad_mtx);
5502         for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
5503                 hdr_prev = list_prev(buflist, hdr);
5504 
5505                 hash_lock = HDR_LOCK(hdr);
5506 
5507                 /*
5508                  * We cannot use mutex_enter or else we can deadlock
5509                  * with l2arc_write_buffers (due to swapping the order
5510                  * the hash lock and l2ad_mtx are taken).
5511                  */
5512                 if (!mutex_tryenter(hash_lock)) {
5513                         /*
5514                          * Missed the hash lock. We must retry so we
5515                          * don't leave the ARC_FLAG_L2_WRITING bit set.
5516                          */
5517                         ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
5518 
5519                         /*
5520                          * We don't want to rescan the headers we've
5521                          * already marked as having been written out, so
5522                          * we reinsert the head node so we can pick up
5523                          * where we left off.
5524                          */
5525                         list_remove(buflist, head);
5526                         list_insert_after(buflist, hdr, head);
5527 
5528                         mutex_exit(&dev->l2ad_mtx);
5529 
5530                         /*
5531                          * We wait for the hash lock to become available
5532                          * to try and prevent busy waiting, and increase
5533                          * the chance we'll be able to acquire the lock
5534                          * the next time around.
5535                          */
5536                         mutex_enter(hash_lock);
5537                         mutex_exit(hash_lock);
5538                         goto top;
5539                 }
5540 
5541                 /*
5542                  * We could not have been moved into the arc_l2c_only
5543                  * state while in-flight due to our ARC_FLAG_L2_WRITING
5544                  * bit being set. Let's just ensure that's being enforced.
5545                  */
5546                 ASSERT(HDR_HAS_L1HDR(hdr));
5547 
5548                 /*
5549                  * We may have allocated a buffer for L2ARC compression,
5550                  * we must release it to avoid leaking this data.
5551                  */
5552                 l2arc_release_cdata_buf(hdr);
5553 
5554                 if (zio->io_error != 0) {
5555                         /*
5556                          * Error - drop L2ARC entry.
5557                          */
5558                         list_remove(buflist, hdr);
5559                         hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
5560 
5561                         ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize);
5562                         ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
5563 
5564                         bytes_dropped += hdr->b_l2hdr.b_asize;
5565                         (void) refcount_remove_many(&dev->l2ad_alloc,
5566                             hdr->b_l2hdr.b_asize, hdr);
5567                 }
5568 
5569                 /*
5570                  * Allow ARC to begin reads and ghost list evictions to
5571                  * this L2ARC entry.
5572                  */
5573                 hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
5574 
5575                 mutex_exit(hash_lock);
5576         }
5577 
5578         atomic_inc_64(&l2arc_writes_done);
5579         list_remove(buflist, head);
5580         ASSERT(!HDR_HAS_L1HDR(head));
5581         kmem_cache_free(hdr_l2only_cache, head);
5582         mutex_exit(&dev->l2ad_mtx);
5583 
5584         vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
5585 
5586         l2arc_do_free_on_write();
5587 
5588         kmem_free(cb, sizeof (l2arc_write_callback_t));
5589 }
5590 
5591 /*
5592  * A read to a cache device completed.  Validate buffer contents before
5593  * handing over to the regular ARC routines.
5594  */
5595 static void
5596 l2arc_read_done(zio_t *zio)
5597 {
5598         l2arc_read_callback_t *cb;
5599         arc_buf_hdr_t *hdr;
5600         arc_buf_t *buf;
5601         kmutex_t *hash_lock;
5602         int equal;
5603 
5604         ASSERT(zio->io_vd != NULL);
5605         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
5606 
5607         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
5608 
5609         cb = zio->io_private;
5610         ASSERT(cb != NULL);
5611         buf = cb->l2rcb_buf;
5612         ASSERT(buf != NULL);
5613 
5614         hash_lock = HDR_LOCK(buf->b_hdr);
5615         mutex_enter(hash_lock);
5616         hdr = buf->b_hdr;
5617         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
5618 
5619         /*
5620          * If the buffer was compressed, decompress it first.
5621          */
5622         if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
5623                 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
5624         ASSERT(zio->io_data != NULL);
5625         ASSERT3U(zio->io_size, ==, hdr->b_size);
5626         ASSERT3U(BP_GET_LSIZE(&cb->l2rcb_bp), ==, hdr->b_size);
5627 
5628         /*
5629          * Check this survived the L2ARC journey.
5630          */
5631         equal = arc_cksum_equal(buf);
5632         if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
5633                 mutex_exit(hash_lock);
5634                 zio->io_private = buf;
5635                 zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
5636                 zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
5637                 arc_read_done(zio);
5638         } else {
5639                 mutex_exit(hash_lock);
5640                 /*
5641                  * Buffer didn't survive caching.  Increment stats and
5642                  * reissue to the original storage device.
5643                  */
5644                 if (zio->io_error != 0) {
5645                         ARCSTAT_BUMP(arcstat_l2_io_error);
5646                 } else {
5647                         zio->io_error = SET_ERROR(EIO);
5648                 }
5649                 if (!equal)
5650                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
5651 
5652                 /*
5653                  * If there's no waiter, issue an async i/o to the primary
5654                  * storage now.  If there *is* a waiter, the caller must
5655                  * issue the i/o in a context where it's OK to block.
5656                  */
5657                 if (zio->io_waiter == NULL) {
5658                         zio_t *pio = zio_unique_parent(zio);
5659 
5660                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
5661 
5662                         zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
5663                             buf->b_data, hdr->b_size, arc_read_done, buf,
5664                             zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
5665                 }
5666         }
5667 
5668         kmem_free(cb, sizeof (l2arc_read_callback_t));
5669 }
5670 
5671 /*
5672  * This is the list priority from which the L2ARC will search for pages to
5673  * cache.  This is used within loops (0..3) to cycle through lists in the
5674  * desired order.  This order can have a significant effect on cache
5675  * performance.
5676  *
5677  * Currently the metadata lists are hit first, MFU then MRU, followed by
5678  * the data lists.  This function returns a locked list, and also returns
5679  * the lock pointer.
5680  */
5681 static multilist_sublist_t *
5682 l2arc_sublist_lock(int list_num)
5683 {
5684         multilist_t *ml = NULL;
5685         unsigned int idx;
5686 
5687         ASSERT(list_num >= 0 && list_num <= 3);
5688 
5689         switch (list_num) {
5690         case 0:
5691                 ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
5692                 break;
5693         case 1:
5694                 ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
5695                 break;
5696         case 2:
5697                 ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
5698                 break;
5699         case 3:
5700                 ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
5701                 break;
5702         }
5703 
5704         /*
5705          * Return a randomly-selected sublist. This is acceptable
5706          * because the caller feeds only a little bit of data for each
5707          * call (8MB). Subsequent calls will result in different
5708          * sublists being selected.
5709          */
5710         idx = multilist_get_random_index(ml);
5711         return (multilist_sublist_lock(ml, idx));
5712 }
5713 
5714 /*
5715  * Evict buffers from the device write hand to the distance specified in
5716  * bytes.  This distance may span populated buffers, it may span nothing.
5717  * This is clearing a region on the L2ARC device ready for writing.
5718  * If the 'all' boolean is set, every buffer is evicted.
5719  */
5720 static void
5721 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
5722 {
5723         list_t *buflist;
5724         arc_buf_hdr_t *hdr, *hdr_prev;
5725         kmutex_t *hash_lock;
5726         uint64_t taddr;
5727 
5728         buflist = &dev->l2ad_buflist;
5729 
5730         if (!all && dev->l2ad_first) {
5731                 /*
5732                  * This is the first sweep through the device.  There is
5733                  * nothing to evict.
5734                  */
5735                 return;
5736         }
5737 
5738         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
5739                 /*
5740                  * When nearing the end of the device, evict to the end
5741                  * before the device write hand jumps to the start.
5742                  */
5743                 taddr = dev->l2ad_end;
5744         } else {
5745                 taddr = dev->l2ad_hand + distance;
5746         }
5747         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
5748             uint64_t, taddr, boolean_t, all);
5749 
5750 top:
5751         mutex_enter(&dev->l2ad_mtx);
5752         for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
5753                 hdr_prev = list_prev(buflist, hdr);
5754 
5755                 hash_lock = HDR_LOCK(hdr);
5756 
5757                 /*
5758                  * We cannot use mutex_enter or else we can deadlock
5759                  * with l2arc_write_buffers (due to swapping the order
5760                  * the hash lock and l2ad_mtx are taken).
5761                  */
5762                 if (!mutex_tryenter(hash_lock)) {
5763                         /*
5764                          * Missed the hash lock.  Retry.
5765                          */
5766                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
5767                         mutex_exit(&dev->l2ad_mtx);
5768                         mutex_enter(hash_lock);
5769                         mutex_exit(hash_lock);
5770                         goto top;
5771                 }
5772 
5773                 if (HDR_L2_WRITE_HEAD(hdr)) {
5774                         /*
5775                          * We hit a write head node.  Leave it for
5776                          * l2arc_write_done().
5777                          */
5778                         list_remove(buflist, hdr);
5779                         mutex_exit(hash_lock);
5780                         continue;
5781                 }
5782 
5783                 if (!all && HDR_HAS_L2HDR(hdr) &&
5784                     (hdr->b_l2hdr.b_daddr > taddr ||
5785                     hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
5786                         /*
5787                          * We've evicted to the target address,
5788                          * or the end of the device.
5789                          */
5790                         mutex_exit(hash_lock);
5791                         break;
5792                 }
5793 
5794                 ASSERT(HDR_HAS_L2HDR(hdr));
5795                 if (!HDR_HAS_L1HDR(hdr)) {
5796                         ASSERT(!HDR_L2_READING(hdr));
5797                         /*
5798                          * This doesn't exist in the ARC.  Destroy.
5799                          * arc_hdr_destroy() will call list_remove()
5800                          * and decrement arcstat_l2_size.
5801                          */
5802                         arc_change_state(arc_anon, hdr, hash_lock);
5803                         arc_hdr_destroy(hdr);
5804                 } else {
5805                         ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
5806                         ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
5807                         /*
5808                          * Invalidate issued or about to be issued
5809                          * reads, since we may be about to write
5810                          * over this location.
5811                          */
5812                         if (HDR_L2_READING(hdr)) {
5813                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
5814                                 hdr->b_flags |= ARC_FLAG_L2_EVICTED;
5815                         }
5816 
5817                         /* Ensure this header has finished being written */
5818                         ASSERT(!HDR_L2_WRITING(hdr));
5819                         ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
5820 
5821                         arc_hdr_l2hdr_destroy(hdr);
5822                 }
5823                 mutex_exit(hash_lock);
5824         }
5825         mutex_exit(&dev->l2ad_mtx);
5826 }
5827 
5828 /*
5829  * Find and write ARC buffers to the L2ARC device.
5830  *
5831  * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
5832  * for reading until they have completed writing.
5833  * The headroom_boost is an in-out parameter used to maintain headroom boost
5834  * state between calls to this function.
5835  *
5836  * Returns the number of bytes actually written (which may be smaller than
5837  * the delta by which the device hand has changed due to alignment).
5838  */
5839 static uint64_t
5840 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
5841     boolean_t *headroom_boost)
5842 {
5843         arc_buf_hdr_t *hdr, *hdr_prev, *head;
5844         uint64_t write_asize, write_psize, write_sz, headroom,
5845             buf_compress_minsz;
5846         void *buf_data;
5847         boolean_t full;
5848         l2arc_write_callback_t *cb;
5849         zio_t *pio, *wzio;
5850         uint64_t guid = spa_load_guid(spa);
5851         const boolean_t do_headroom_boost = *headroom_boost;
5852 
5853         ASSERT(dev->l2ad_vdev != NULL);
5854 
5855         /* Lower the flag now, we might want to raise it again later. */
5856         *headroom_boost = B_FALSE;
5857 
5858         pio = NULL;
5859         write_sz = write_asize = write_psize = 0;
5860         full = B_FALSE;
5861         head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
5862         head->b_flags |= ARC_FLAG_L2_WRITE_HEAD;
5863         head->b_flags |= ARC_FLAG_HAS_L2HDR;
5864 
5865         /*
5866          * We will want to try to compress buffers that are at least 2x the
5867          * device sector size.
5868          */
5869         buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
5870 
5871         /*
5872          * Copy buffers for L2ARC writing.
5873          */
5874         for (int try = 0; try <= 3; try++) {
5875                 multilist_sublist_t *mls = l2arc_sublist_lock(try);
5876                 uint64_t passed_sz = 0;
5877 
5878                 /*
5879                  * L2ARC fast warmup.
5880                  *
5881                  * Until the ARC is warm and starts to evict, read from the
5882                  * head of the ARC lists rather than the tail.
5883                  */
5884                 if (arc_warm == B_FALSE)
5885                         hdr = multilist_sublist_head(mls);
5886                 else
5887                         hdr = multilist_sublist_tail(mls);
5888 
5889                 headroom = target_sz * l2arc_headroom;
5890                 if (do_headroom_boost)
5891                         headroom = (headroom * l2arc_headroom_boost) / 100;
5892 
5893                 for (; hdr; hdr = hdr_prev) {
5894                         kmutex_t *hash_lock;
5895                         uint64_t buf_sz;
5896 
5897                         if (arc_warm == B_FALSE)
5898                                 hdr_prev = multilist_sublist_next(mls, hdr);
5899                         else
5900                                 hdr_prev = multilist_sublist_prev(mls, hdr);
5901 
5902                         hash_lock = HDR_LOCK(hdr);
5903                         if (!mutex_tryenter(hash_lock)) {
5904                                 /*
5905                                  * Skip this buffer rather than waiting.
5906                                  */
5907                                 continue;
5908                         }
5909 
5910                         passed_sz += hdr->b_size;
5911                         if (passed_sz > headroom) {
5912                                 /*
5913                                  * Searched too far.
5914                                  */
5915                                 mutex_exit(hash_lock);
5916                                 break;
5917                         }
5918 
5919                         if (!l2arc_write_eligible(guid, hdr)) {
5920                                 mutex_exit(hash_lock);
5921                                 continue;
5922                         }
5923 
5924                         if ((write_sz + hdr->b_size) > target_sz) {
5925                                 full = B_TRUE;
5926                                 mutex_exit(hash_lock);
5927                                 break;
5928                         }
5929 
5930                         if (pio == NULL) {
5931                                 /*
5932                                  * Insert a dummy header on the buflist so
5933                                  * l2arc_write_done() can find where the
5934                                  * write buffers begin without searching.
5935                                  */
5936                                 mutex_enter(&dev->l2ad_mtx);
5937                                 list_insert_head(&dev->l2ad_buflist, head);
5938                                 mutex_exit(&dev->l2ad_mtx);
5939 
5940                                 cb = kmem_alloc(
5941                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
5942                                 cb->l2wcb_dev = dev;
5943                                 cb->l2wcb_head = head;
5944                                 pio = zio_root(spa, l2arc_write_done, cb,
5945                                     ZIO_FLAG_CANFAIL);
5946                         }
5947 
5948                         /*
5949                          * Create and add a new L2ARC header.
5950                          */
5951                         hdr->b_l2hdr.b_dev = dev;
5952                         hdr->b_flags |= ARC_FLAG_L2_WRITING;
5953                         /*
5954                          * Temporarily stash the data buffer in b_tmp_cdata.
5955                          * The subsequent write step will pick it up from
5956                          * there. This is because can't access b_l1hdr.b_buf
5957                          * without holding the hash_lock, which we in turn
5958                          * can't access without holding the ARC list locks
5959                          * (which we want to avoid during compression/writing).
5960                          */
5961                         hdr->b_l2hdr.b_compress = ZIO_COMPRESS_OFF;
5962                         hdr->b_l2hdr.b_asize = hdr->b_size;
5963                         hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data;
5964 
5965                         /*
5966                          * Explicitly set the b_daddr field to a known
5967                          * value which means "invalid address". This
5968                          * enables us to differentiate which stage of
5969                          * l2arc_write_buffers() the particular header
5970                          * is in (e.g. this loop, or the one below).
5971                          * ARC_FLAG_L2_WRITING is not enough to make
5972                          * this distinction, and we need to know in
5973                          * order to do proper l2arc vdev accounting in
5974                          * arc_release() and arc_hdr_destroy().
5975                          *
5976                          * Note, we can't use a new flag to distinguish
5977                          * the two stages because we don't hold the
5978                          * header's hash_lock below, in the second stage
5979                          * of this function. Thus, we can't simply
5980                          * change the b_flags field to denote that the
5981                          * IO has been sent. We can change the b_daddr
5982                          * field of the L2 portion, though, since we'll
5983                          * be holding the l2ad_mtx; which is why we're
5984                          * using it to denote the header's state change.
5985                          */
5986                         hdr->b_l2hdr.b_daddr = L2ARC_ADDR_UNSET;
5987 
5988                         buf_sz = hdr->b_size;
5989                         hdr->b_flags |= ARC_FLAG_HAS_L2HDR;
5990 
5991                         mutex_enter(&dev->l2ad_mtx);
5992                         list_insert_head(&dev->l2ad_buflist, hdr);
5993                         mutex_exit(&dev->l2ad_mtx);
5994 
5995                         /*
5996                          * Compute and store the buffer cksum before
5997                          * writing.  On debug the cksum is verified first.
5998                          */
5999                         arc_cksum_verify(hdr->b_l1hdr.b_buf);
6000                         arc_cksum_compute(hdr->b_l1hdr.b_buf, B_TRUE);
6001 
6002                         mutex_exit(hash_lock);
6003 
6004                         write_sz += buf_sz;
6005                 }
6006 
6007                 multilist_sublist_unlock(mls);
6008 
6009                 if (full == B_TRUE)
6010                         break;
6011         }
6012 
6013         /* No buffers selected for writing? */
6014         if (pio == NULL) {
6015                 ASSERT0(write_sz);
6016                 ASSERT(!HDR_HAS_L1HDR(head));
6017                 kmem_cache_free(hdr_l2only_cache, head);
6018                 return (0);
6019         }
6020 
6021         mutex_enter(&dev->l2ad_mtx);
6022 
6023         /*
6024          * Now start writing the buffers. We're starting at the write head
6025          * and work backwards, retracing the course of the buffer selector
6026          * loop above.
6027          */
6028         for (hdr = list_prev(&dev->l2ad_buflist, head); hdr;
6029             hdr = list_prev(&dev->l2ad_buflist, hdr)) {
6030                 uint64_t buf_sz;
6031 
6032                 /*
6033                  * We rely on the L1 portion of the header below, so
6034                  * it's invalid for this header to have been evicted out
6035                  * of the ghost cache, prior to being written out. The
6036                  * ARC_FLAG_L2_WRITING bit ensures this won't happen.
6037                  */
6038                 ASSERT(HDR_HAS_L1HDR(hdr));
6039 
6040                 /*
6041                  * We shouldn't need to lock the buffer here, since we flagged
6042                  * it as ARC_FLAG_L2_WRITING in the previous step, but we must
6043                  * take care to only access its L2 cache parameters. In
6044                  * particular, hdr->l1hdr.b_buf may be invalid by now due to
6045                  * ARC eviction.
6046                  */
6047                 hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
6048 
6049                 if ((HDR_L2COMPRESS(hdr)) &&
6050                     hdr->b_l2hdr.b_asize >= buf_compress_minsz) {
6051                         if (l2arc_compress_buf(hdr)) {
6052                                 /*
6053                                  * If compression succeeded, enable headroom
6054                                  * boost on the next scan cycle.
6055                                  */
6056                                 *headroom_boost = B_TRUE;
6057                         }
6058                 }
6059 
6060                 /*
6061                  * Pick up the buffer data we had previously stashed away
6062                  * (and now potentially also compressed).
6063                  */
6064                 buf_data = hdr->b_l1hdr.b_tmp_cdata;
6065                 buf_sz = hdr->b_l2hdr.b_asize;
6066 
6067                 /*
6068                  * We need to do this regardless if buf_sz is zero or
6069                  * not, otherwise, when this l2hdr is evicted we'll
6070                  * remove a reference that was never added.
6071                  */
6072                 (void) refcount_add_many(&dev->l2ad_alloc, buf_sz, hdr);
6073 
6074                 /* Compression may have squashed the buffer to zero length. */
6075                 if (buf_sz != 0) {
6076                         uint64_t buf_p_sz;
6077 
6078                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
6079                             dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
6080                             NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
6081                             ZIO_FLAG_CANFAIL, B_FALSE);
6082 
6083                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
6084                             zio_t *, wzio);
6085                         (void) zio_nowait(wzio);
6086 
6087                         write_asize += buf_sz;
6088 
6089                         /*
6090                          * Keep the clock hand suitably device-aligned.
6091                          */
6092                         buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
6093                         write_psize += buf_p_sz;
6094                         dev->l2ad_hand += buf_p_sz;
6095                 }
6096         }
6097 
6098         mutex_exit(&dev->l2ad_mtx);
6099 
6100         ASSERT3U(write_asize, <=, target_sz);
6101         ARCSTAT_BUMP(arcstat_l2_writes_sent);
6102         ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
6103         ARCSTAT_INCR(arcstat_l2_size, write_sz);
6104         ARCSTAT_INCR(arcstat_l2_asize, write_asize);
6105         vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
6106 
6107         /*
6108          * Bump device hand to the device start if it is approaching the end.
6109          * l2arc_evict() will already have evicted ahead for this case.
6110          */
6111         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
6112                 dev->l2ad_hand = dev->l2ad_start;
6113                 dev->l2ad_first = B_FALSE;
6114         }
6115 
6116         dev->l2ad_writing = B_TRUE;
6117         (void) zio_wait(pio);
6118         dev->l2ad_writing = B_FALSE;
6119 
6120         return (write_asize);
6121 }
6122 
6123 /*
6124  * Compresses an L2ARC buffer.
6125  * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its
6126  * size in l2hdr->b_asize. This routine tries to compress the data and
6127  * depending on the compression result there are three possible outcomes:
6128  * *) The buffer was incompressible. The original l2hdr contents were left
6129  *    untouched and are ready for writing to an L2 device.
6130  * *) The buffer was all-zeros, so there is no need to write it to an L2
6131  *    device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
6132  *    set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
6133  * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
6134  *    data buffer which holds the compressed data to be written, and b_asize
6135  *    tells us how much data there is. b_compress is set to the appropriate
6136  *    compression algorithm. Once writing is done, invoke
6137  *    l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
6138  *
6139  * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
6140  * buffer was incompressible).
6141  */
6142 static boolean_t
6143 l2arc_compress_buf(arc_buf_hdr_t *hdr)
6144 {
6145         void *cdata;
6146         size_t csize, len, rounded;
6147         ASSERT(HDR_HAS_L2HDR(hdr));
6148         l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
6149 
6150         ASSERT(HDR_HAS_L1HDR(hdr));
6151         ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
6152         ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
6153 
6154         len = l2hdr->b_asize;
6155         cdata = zio_data_buf_alloc(len);
6156         ASSERT3P(cdata, !=, NULL);
6157         csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata,
6158             cdata, l2hdr->b_asize);
6159 
6160         rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
6161         if (rounded > csize) {
6162                 bzero((char *)cdata + csize, rounded - csize);
6163                 csize = rounded;
6164         }
6165 
6166         if (csize == 0) {
6167                 /* zero block, indicate that there's nothing to write */
6168                 zio_data_buf_free(cdata, len);
6169                 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
6170                 l2hdr->b_asize = 0;
6171                 hdr->b_l1hdr.b_tmp_cdata = NULL;
6172                 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
6173                 return (B_TRUE);
6174         } else if (csize > 0 && csize < len) {
6175                 /*
6176                  * Compression succeeded, we'll keep the cdata around for
6177                  * writing and release it afterwards.
6178                  */
6179                 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
6180                 l2hdr->b_asize = csize;
6181                 hdr->b_l1hdr.b_tmp_cdata = cdata;
6182                 ARCSTAT_BUMP(arcstat_l2_compress_successes);
6183                 return (B_TRUE);
6184         } else {
6185                 /*
6186                  * Compression failed, release the compressed buffer.
6187                  * l2hdr will be left unmodified.
6188                  */
6189                 zio_data_buf_free(cdata, len);
6190                 ARCSTAT_BUMP(arcstat_l2_compress_failures);
6191                 return (B_FALSE);
6192         }
6193 }
6194 
6195 /*
6196  * Decompresses a zio read back from an l2arc device. On success, the
6197  * underlying zio's io_data buffer is overwritten by the uncompressed
6198  * version. On decompression error (corrupt compressed stream), the
6199  * zio->io_error value is set to signal an I/O error.
6200  *
6201  * Please note that the compressed data stream is not checksummed, so
6202  * if the underlying device is experiencing data corruption, we may feed
6203  * corrupt data to the decompressor, so the decompressor needs to be
6204  * able to handle this situation (LZ4 does).
6205  */
6206 static void
6207 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
6208 {
6209         ASSERT(L2ARC_IS_VALID_COMPRESS(c));
6210 
6211         if (zio->io_error != 0) {
6212                 /*
6213                  * An io error has occured, just restore the original io
6214                  * size in preparation for a main pool read.
6215                  */
6216                 zio->io_orig_size = zio->io_size = hdr->b_size;
6217                 return;
6218         }
6219 
6220         if (c == ZIO_COMPRESS_EMPTY) {
6221                 /*
6222                  * An empty buffer results in a null zio, which means we
6223                  * need to fill its io_data after we're done restoring the
6224                  * buffer's contents.
6225                  */
6226                 ASSERT(hdr->b_l1hdr.b_buf != NULL);
6227                 bzero(hdr->b_l1hdr.b_buf->b_data, hdr->b_size);
6228                 zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_buf->b_data;
6229         } else {
6230                 ASSERT(zio->io_data != NULL);
6231                 /*
6232                  * We copy the compressed data from the start of the arc buffer
6233                  * (the zio_read will have pulled in only what we need, the
6234                  * rest is garbage which we will overwrite at decompression)
6235                  * and then decompress back to the ARC data buffer. This way we
6236                  * can minimize copying by simply decompressing back over the
6237                  * original compressed data (rather than decompressing to an
6238                  * aux buffer and then copying back the uncompressed buffer,
6239                  * which is likely to be much larger).
6240                  */
6241                 uint64_t csize;
6242                 void *cdata;
6243 
6244                 csize = zio->io_size;
6245                 cdata = zio_data_buf_alloc(csize);
6246                 bcopy(zio->io_data, cdata, csize);
6247                 if (zio_decompress_data(c, cdata, zio->io_data, csize,
6248                     hdr->b_size) != 0)
6249                         zio->io_error = EIO;
6250                 zio_data_buf_free(cdata, csize);
6251         }
6252 
6253         /* Restore the expected uncompressed IO size. */
6254         zio->io_orig_size = zio->io_size = hdr->b_size;
6255 }
6256 
6257 /*
6258  * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
6259  * This buffer serves as a temporary holder of compressed data while
6260  * the buffer entry is being written to an l2arc device. Once that is
6261  * done, we can dispose of it.
6262  */
6263 static void
6264 l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
6265 {
6266         ASSERT(HDR_HAS_L2HDR(hdr));
6267         enum zio_compress comp = hdr->b_l2hdr.b_compress;
6268 
6269         ASSERT(HDR_HAS_L1HDR(hdr));
6270         ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp));
6271 
6272         if (comp == ZIO_COMPRESS_OFF) {
6273                 /*
6274                  * In this case, b_tmp_cdata points to the same buffer
6275                  * as the arc_buf_t's b_data field. We don't want to
6276                  * free it, since the arc_buf_t will handle that.
6277                  */
6278                 hdr->b_l1hdr.b_tmp_cdata = NULL;
6279         } else if (comp == ZIO_COMPRESS_EMPTY) {
6280                 /*
6281                  * In this case, b_tmp_cdata was compressed to an empty
6282                  * buffer, thus there's nothing to free and b_tmp_cdata
6283                  * should have been set to NULL in l2arc_write_buffers().
6284                  */
6285                 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
6286         } else {
6287                 /*
6288                  * If the data was compressed, then we've allocated a
6289                  * temporary buffer for it, so now we need to release it.
6290                  */
6291                 ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
6292                 zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata,
6293                     hdr->b_size);
6294                 hdr->b_l1hdr.b_tmp_cdata = NULL;
6295         }
6296 
6297 }
6298 
6299 /*
6300  * This thread feeds the L2ARC at regular intervals.  This is the beating
6301  * heart of the L2ARC.
6302  */
6303 static void
6304 l2arc_feed_thread(void)
6305 {
6306         callb_cpr_t cpr;
6307         l2arc_dev_t *dev;
6308         spa_t *spa;
6309         uint64_t size, wrote;
6310         clock_t begin, next = ddi_get_lbolt();
6311         boolean_t headroom_boost = B_FALSE;
6312 
6313         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
6314 
6315         mutex_enter(&l2arc_feed_thr_lock);
6316 
6317         while (l2arc_thread_exit == 0) {
6318                 CALLB_CPR_SAFE_BEGIN(&cpr);
6319                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
6320                     next);
6321                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
6322                 next = ddi_get_lbolt() + hz;
6323 
6324                 /*
6325                  * Quick check for L2ARC devices.
6326                  */
6327                 mutex_enter(&l2arc_dev_mtx);
6328                 if (l2arc_ndev == 0) {
6329                         mutex_exit(&l2arc_dev_mtx);
6330                         continue;
6331                 }
6332                 mutex_exit(&l2arc_dev_mtx);
6333                 begin = ddi_get_lbolt();
6334 
6335                 /*
6336                  * This selects the next l2arc device to write to, and in
6337                  * doing so the next spa to feed from: dev->l2ad_spa.   This
6338                  * will return NULL if there are now no l2arc devices or if
6339                  * they are all faulted.
6340                  *
6341                  * If a device is returned, its spa's config lock is also
6342                  * held to prevent device removal.  l2arc_dev_get_next()
6343                  * will grab and release l2arc_dev_mtx.
6344                  */
6345                 if ((dev = l2arc_dev_get_next()) == NULL)
6346                         continue;
6347 
6348                 spa = dev->l2ad_spa;
6349                 ASSERT(spa != NULL);
6350 
6351                 /*
6352                  * If the pool is read-only then force the feed thread to
6353                  * sleep a little longer.
6354                  */
6355                 if (!spa_writeable(spa)) {
6356                         next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
6357                         spa_config_exit(spa, SCL_L2ARC, dev);
6358                         continue;
6359                 }
6360 
6361                 /*
6362                  * Avoid contributing to memory pressure.
6363                  */
6364                 if (arc_reclaim_needed()) {
6365                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
6366                         spa_config_exit(spa, SCL_L2ARC, dev);
6367                         continue;
6368                 }
6369 
6370                 ARCSTAT_BUMP(arcstat_l2_feeds);
6371 
6372                 size = l2arc_write_size();
6373 
6374                 /*
6375                  * Evict L2ARC buffers that will be overwritten.
6376                  */
6377                 l2arc_evict(dev, size, B_FALSE);
6378 
6379                 /*
6380                  * Write ARC buffers.
6381                  */
6382                 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
6383 
6384                 /*
6385                  * Calculate interval between writes.
6386                  */
6387                 next = l2arc_write_interval(begin, size, wrote);
6388                 spa_config_exit(spa, SCL_L2ARC, dev);
6389         }
6390 
6391         l2arc_thread_exit = 0;
6392         cv_broadcast(&l2arc_feed_thr_cv);
6393         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
6394         thread_exit();
6395 }
6396 
6397 boolean_t
6398 l2arc_vdev_present(vdev_t *vd)
6399 {
6400         l2arc_dev_t *dev;
6401 
6402         mutex_enter(&l2arc_dev_mtx);
6403         for (dev = list_head(l2arc_dev_list); dev != NULL;
6404             dev = list_next(l2arc_dev_list, dev)) {
6405                 if (dev->l2ad_vdev == vd)
6406                         break;
6407         }
6408         mutex_exit(&l2arc_dev_mtx);
6409 
6410         return (dev != NULL);
6411 }
6412 
6413 /*
6414  * Add a vdev for use by the L2ARC.  By this point the spa has already
6415  * validated the vdev and opened it.
6416  */
6417 void
6418 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
6419 {
6420         l2arc_dev_t *adddev;
6421 
6422         ASSERT(!l2arc_vdev_present(vd));
6423 
6424         /*
6425          * Create a new l2arc device entry.
6426          */
6427         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
6428         adddev->l2ad_spa = spa;
6429         adddev->l2ad_vdev = vd;
6430         adddev->l2ad_start = VDEV_LABEL_START_SIZE;
6431         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
6432         adddev->l2ad_hand = adddev->l2ad_start;
6433         adddev->l2ad_first = B_TRUE;
6434         adddev->l2ad_writing = B_FALSE;
6435 
6436         mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
6437         /*
6438          * This is a list of all ARC buffers that are still valid on the
6439          * device.
6440          */
6441         list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
6442             offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
6443 
6444         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
6445         refcount_create(&adddev->l2ad_alloc);
6446 
6447         /*
6448          * Add device to global list
6449          */
6450         mutex_enter(&l2arc_dev_mtx);
6451         list_insert_head(l2arc_dev_list, adddev);
6452         atomic_inc_64(&l2arc_ndev);
6453         mutex_exit(&l2arc_dev_mtx);
6454 }
6455 
6456 /*
6457  * Remove a vdev from the L2ARC.
6458  */
6459 void
6460 l2arc_remove_vdev(vdev_t *vd)
6461 {
6462         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
6463 
6464         /*
6465          * Find the device by vdev
6466          */
6467         mutex_enter(&l2arc_dev_mtx);
6468         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
6469                 nextdev = list_next(l2arc_dev_list, dev);
6470                 if (vd == dev->l2ad_vdev) {
6471                         remdev = dev;
6472                         break;
6473                 }
6474         }
6475         ASSERT(remdev != NULL);
6476 
6477         /*
6478          * Remove device from global list
6479          */
6480         list_remove(l2arc_dev_list, remdev);
6481         l2arc_dev_last = NULL;          /* may have been invalidated */
6482         atomic_dec_64(&l2arc_ndev);
6483         mutex_exit(&l2arc_dev_mtx);
6484 
6485         /*
6486          * Clear all buflists and ARC references.  L2ARC device flush.
6487          */
6488         l2arc_evict(remdev, 0, B_TRUE);
6489         list_destroy(&remdev->l2ad_buflist);
6490         mutex_destroy(&remdev->l2ad_mtx);
6491         refcount_destroy(&remdev->l2ad_alloc);
6492         kmem_free(remdev, sizeof (l2arc_dev_t));
6493 }
6494 
6495 void
6496 l2arc_init(void)
6497 {
6498         l2arc_thread_exit = 0;
6499         l2arc_ndev = 0;
6500         l2arc_writes_sent = 0;
6501         l2arc_writes_done = 0;
6502 
6503         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
6504         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
6505         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
6506         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
6507 
6508         l2arc_dev_list = &L2ARC_dev_list;
6509         l2arc_free_on_write = &L2ARC_free_on_write;
6510         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
6511             offsetof(l2arc_dev_t, l2ad_node));
6512         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
6513             offsetof(l2arc_data_free_t, l2df_list_node));
6514 }
6515 
6516 void
6517 l2arc_fini(void)
6518 {
6519         /*
6520          * This is called from dmu_fini(), which is called from spa_fini();
6521          * Because of this, we can assume that all l2arc devices have
6522          * already been removed when the pools themselves were removed.
6523          */
6524 
6525         l2arc_do_free_on_write();
6526 
6527         mutex_destroy(&l2arc_feed_thr_lock);
6528         cv_destroy(&l2arc_feed_thr_cv);
6529         mutex_destroy(&l2arc_dev_mtx);
6530         mutex_destroy(&l2arc_free_on_write_mtx);
6531 
6532         list_destroy(l2arc_dev_list);
6533         list_destroy(l2arc_free_on_write);
6534 }
6535 
6536 void
6537 l2arc_start(void)
6538 {
6539         if (!(spa_mode_global & FWRITE))
6540                 return;
6541 
6542         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
6543             TS_RUN, minclsyspri);
6544 }
6545 
6546 void
6547 l2arc_stop(void)
6548 {
6549         if (!(spa_mode_global & FWRITE))
6550                 return;
6551 
6552         mutex_enter(&l2arc_feed_thr_lock);
6553         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
6554         l2arc_thread_exit = 1;
6555         while (l2arc_thread_exit != 0)
6556                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
6557         mutex_exit(&l2arc_feed_thr_lock);
6558 }