1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  25  * Copyright (c) 2013 by Delphix. All rights reserved.
  26  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  27  */
  28 
  29 /*
  30  * DVA-based Adjustable Replacement Cache
  31  *
  32  * While much of the theory of operation used here is
  33  * based on the self-tuning, low overhead replacement cache
  34  * presented by Megiddo and Modha at FAST 2003, there are some
  35  * significant differences:
  36  *
  37  * 1. The Megiddo and Modha model assumes any page is evictable.
  38  * Pages in its cache cannot be "locked" into memory.  This makes
  39  * the eviction algorithm simple: evict the last page in the list.
  40  * This also make the performance characteristics easy to reason
  41  * about.  Our cache is not so simple.  At any given moment, some
  42  * subset of the blocks in the cache are un-evictable because we
  43  * have handed out a reference to them.  Blocks are only evictable
  44  * when there are no external references active.  This makes
  45  * eviction far more problematic:  we choose to evict the evictable
  46  * blocks that are the "lowest" in the list.
  47  *
  48  * There are times when it is not possible to evict the requested
  49  * space.  In these circumstances we are unable to adjust the cache
  50  * size.  To prevent the cache growing unbounded at these times we
  51  * implement a "cache throttle" that slows the flow of new data
  52  * into the cache until we can make space available.
  53  *
  54  * 2. The Megiddo and Modha model assumes a fixed cache size.
  55  * Pages are evicted when the cache is full and there is a cache
  56  * miss.  Our model has a variable sized cache.  It grows with
  57  * high use, but also tries to react to memory pressure from the
  58  * operating system: decreasing its size when system memory is
  59  * tight.
  60  *
  61  * 3. The Megiddo and Modha model assumes a fixed page size. All
  62  * elements of the cache are therefore exactly the same size.  So
  63  * when adjusting the cache size following a cache miss, its simply
  64  * a matter of choosing a single page to evict.  In our model, we
  65  * have variable sized cache blocks (rangeing from 512 bytes to
  66  * 128K bytes).  We therefore choose a set of blocks to evict to make
  67  * space for a cache miss that approximates as closely as possible
  68  * the space used by the new block.
  69  *
  70  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  71  * by N. Megiddo & D. Modha, FAST 2003
  72  */
  73 
  74 /*
  75  * External users typically access ARC buffers via a hash table
  76  * lookup, using the DVA, spa_t pointer value and the birth TXG
  77  * number as the key. The hash value is derived by buf_hash(),
  78  * which spits out a 64-bit hash index. This index is then masked
  79  * with ht_mask to obtain the final index into the hash table:
  80  *
  81  *                     ,---------------- & ht_mask ----------------,
  82  * 64-bit hash value   |             (hash table index)             |
  83  * |XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
  84  *
  85  * Sizing of the hash table is done at boot from the amount of
  86  * physical memory. We start with a base value of 2^12 hash
  87  * buckets and then evaluate whether this number, multiplied by
  88  * 2^zfs_arc_ht_base_masklen (the minimum mask length), is
  89  * greater than or equal to the amount of physical memory. If not,
  90  * we double the number of hash buckets and repeat. Using the
  91  * default settings these values translate to ~1 MB of hash tables
  92  * for each 1 GB of physical memory.
  93  *
  94  * The locking model:
  95  *
  96  * A new reference to a cache buffer can be obtained in two
  97  * ways: 1) via a hash table lookup using the DVA as a key,
  98  * or 2) via one of the ARC lists.  The arc_read() interface
  99  * uses method 1, while the internal arc algorithms for
 100  * adjusting the cache use method 2.  We therefore provide two
 101  * types of locks: 1) the hash table lock array, and 2) the
 102  * arc list locks.
 103  *
 104  * Buffers do not have their own mutexes, rather they rely on the
 105  * hash table mutexes for the bulk of their protection (i.e. most
 106  * fields in the arc_buf_hdr_t are protected by these mutexes). The
 107  * specific mutex is selected by taking its hash value and masking
 108  * it by ht_lock_mask, which then produces an index into the mutex
 109  * table. The size of the lock table is derived from the amount of
 110  * physical memory, which is simply divided by
 111  * 2^zfs_arc_ht_lock_shift, giving the number of locks, with a
 112  * minimum of MIN_BUF_LOCKS.
 113  *
 114  * buf_hash_find() returns the appropriate mutex (held) when it
 115  * locates the requested buffer in the hash table.  It returns
 116  * NULL for the mutex if the buffer was not in the table.
 117  *
 118  * buf_hash_remove() expects the appropriate hash mutex to be
 119  * already held before it is invoked.
 120  *
 121  * Each arc state also has a mutex which is used to protect the
 122  * buffer list associated with the state.  When attempting to
 123  * obtain a hash table lock while holding an arc list lock you
 124  * must use: mutex_tryenter() to avoid deadlock.  Also note that
 125  * the active state mutex must be held before the ghost state mutex.
 126  *
 127  * Arc buffers may have an associated eviction callback function.
 128  * This function will be invoked prior to removing the buffer (e.g.
 129  * in arc_do_user_evicts()).  Note however that the data associated
 130  * with the buffer may be evicted prior to the callback.  The callback
 131  * must be made with *no locks held* (to prevent deadlock).  Additionally,
 132  * the users of callbacks must ensure that their private data is
 133  * protected from simultaneous callbacks from arc_buf_evict()
 134  * and arc_do_user_evicts().
 135  *
 136  * Note that the majority of the performance stats are manipulated
 137  * with atomic operations.
 138  *
 139  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
 140  *
 141  *      - L2ARC buflist creation
 142  *      - L2ARC buflist eviction
 143  *      - L2ARC write completion, which walks L2ARC buflists
 144  *      - ARC header destruction, as it removes from L2ARC buflists
 145  *      - ARC header release, as it removes from L2ARC buflists
 146  */
 147 
 148 #include <sys/spa.h>
 149 #include <sys/zio.h>
 150 #include <sys/zio_compress.h>
 151 #include <sys/zfs_context.h>
 152 #include <sys/arc.h>
 153 #include <sys/refcount.h>
 154 #include <sys/vdev.h>
 155 #include <sys/vdev_impl.h>
 156 #include <sys/dsl_pool.h>
 157 #ifdef _KERNEL
 158 #include <sys/vmsystm.h>
 159 #include <vm/anon.h>
 160 #include <sys/fs/swapnode.h>
 161 #include <sys/dnlc.h>
 162 #endif
 163 #include <sys/callb.h>
 164 #include <sys/kstat.h>
 165 #include <zfs_fletcher.h>
 166 
 167 #ifndef _KERNEL
 168 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
 169 boolean_t arc_watch = B_FALSE;
 170 int arc_procfd;
 171 #endif
 172 
 173 static kmutex_t         arc_reclaim_thr_lock;
 174 static kcondvar_t       arc_reclaim_thr_cv;     /* used to signal reclaim thr */
 175 static uint8_t          arc_thread_exit;
 176 
 177 #define ARC_REDUCE_DNLC_PERCENT 3
 178 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
 179 
 180 typedef enum arc_reclaim_strategy {
 181         ARC_RECLAIM_AGGR,               /* Aggressive reclaim strategy */
 182         ARC_RECLAIM_CONS                /* Conservative reclaim strategy */
 183 } arc_reclaim_strategy_t;
 184 
 185 /*
 186  * The number of iterations through arc_evict_*() before we
 187  * drop & reacquire the lock.
 188  */
 189 int arc_evict_iterations = 100;
 190 
 191 /* number of seconds before growing cache again */
 192 static int              arc_grow_retry = 60;
 193 
 194 /* shift of arc_c for calculating both min and max arc_p */
 195 static int              arc_p_min_shift = 4;
 196 
 197 /* log2(fraction of arc to reclaim) */
 198 static int              arc_shrink_shift = 5;
 199 
 200 /*
 201  * minimum lifespan of a prefetch block in clock ticks
 202  * (initialized in arc_init())
 203  */
 204 static int              arc_min_prefetch_lifespan;
 205 
 206 /*
 207  * If this percent of memory is free, don't throttle.
 208  */
 209 int arc_lotsfree_percent = 10;
 210 
 211 static int arc_dead;
 212 
 213 /*
 214  * The arc has filled available memory and has now warmed up.
 215  */
 216 static boolean_t arc_warm;
 217 
 218 /*
 219  * These tunables are for performance analysis.
 220  */
 221 uint64_t zfs_arc_max;
 222 uint64_t zfs_arc_min;
 223 uint64_t zfs_arc_meta_limit = 0;
 224 int zfs_arc_grow_retry = 0;
 225 int zfs_arc_shrink_shift = 0;
 226 int zfs_arc_p_min_shift = 0;
 227 int zfs_disable_dup_eviction = 0;
 228 
 229 /*
 230  * Used to calculate the size of ARC hash tables and number of hash locks.
 231  * See big theory block comment at the start of this file.
 232  */
 233 uint64_t zfs_arc_ht_base_masklen = 13;
 234 /*
 235  * We want to allocate one hash lock for every 4GB of memory with a minimum
 236  * of MIN_BUF_LOCKS.
 237  */
 238 uint64_t zfs_arc_ht_lock_shift = 32;
 239 #define MIN_BUF_LOCKS   256
 240 
 241 /*
 242  * Note that buffers can be in one of 6 states:
 243  *      ARC_anon        - anonymous (discussed below)
 244  *      ARC_mru         - recently used, currently cached
 245  *      ARC_mru_ghost   - recentely used, no longer in cache
 246  *      ARC_mfu         - frequently used, currently cached
 247  *      ARC_mfu_ghost   - frequently used, no longer in cache
 248  *      ARC_l2c_only    - exists in L2ARC but not other states
 249  * When there are no active references to the buffer, they are
 250  * are linked onto a list in one of these arc states.  These are
 251  * the only buffers that can be evicted or deleted.  Within each
 252  * state there are multiple lists, one for meta-data and one for
 253  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 254  * etc.) is tracked separately so that it can be managed more
 255  * explicitly: favored over data, limited explicitly.
 256  *
 257  * Anonymous buffers are buffers that are not associated with
 258  * a DVA.  These are buffers that hold dirty block copies
 259  * before they are written to stable storage.  By definition,
 260  * they are "ref'd" and are considered part of arc_mru
 261  * that cannot be freed.  Generally, they will aquire a DVA
 262  * as they are written and migrate onto the arc_mru list.
 263  *
 264  * The ARC_l2c_only state is for buffers that are in the second
 265  * level ARC but no longer in any of the ARC_m* lists.  The second
 266  * level ARC itself may also contain buffers that are in any of
 267  * the ARC_m* states - meaning that a buffer can exist in two
 268  * places.  The reason for the ARC_l2c_only state is to keep the
 269  * buffer header in the hash table, so that reads that hit the
 270  * second level ARC benefit from these fast lookups.
 271  */
 272 
 273 typedef struct arc_state {
 274         list_t  arcs_list[ARC_BUFC_NUMTYPES];   /* list of evictable buffers */
 275         uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
 276         uint64_t arcs_size;     /* total amount of data in this state */
 277         kmutex_t arcs_mtx;
 278 } arc_state_t;
 279 
 280 /* The 6 states: */
 281 static arc_state_t ARC_anon;
 282 static arc_state_t ARC_mru;
 283 static arc_state_t ARC_mru_ghost;
 284 static arc_state_t ARC_mfu;
 285 static arc_state_t ARC_mfu_ghost;
 286 static arc_state_t ARC_l2c_only;
 287 
 288 typedef struct arc_stats {
 289         kstat_named_t arcstat_hits;
 290         kstat_named_t arcstat_misses;
 291         kstat_named_t arcstat_demand_data_hits;
 292         kstat_named_t arcstat_demand_data_misses;
 293         kstat_named_t arcstat_demand_metadata_hits;
 294         kstat_named_t arcstat_demand_metadata_misses;
 295         kstat_named_t arcstat_prefetch_data_hits;
 296         kstat_named_t arcstat_prefetch_data_misses;
 297         kstat_named_t arcstat_prefetch_metadata_hits;
 298         kstat_named_t arcstat_prefetch_metadata_misses;
 299         kstat_named_t arcstat_mru_hits;
 300         kstat_named_t arcstat_mru_ghost_hits;
 301         kstat_named_t arcstat_mfu_hits;
 302         kstat_named_t arcstat_mfu_ghost_hits;
 303         kstat_named_t arcstat_deleted;
 304         kstat_named_t arcstat_recycle_miss;
 305         /*
 306          * Number of buffers that could not be evicted because the hash lock
 307          * was held by another thread.  The lock may not necessarily be held
 308          * by something using the same buffer, since hash locks are shared
 309          * by multiple buffers.
 310          */
 311         kstat_named_t arcstat_mutex_miss;
 312         /*
 313          * Number of buffers skipped because they have I/O in progress, are
 314          * indrect prefetch buffers that have not lived long enough, or are
 315          * not from the spa we're trying to evict from.
 316          */
 317         kstat_named_t arcstat_evict_skip;
 318         kstat_named_t arcstat_evict_l2_cached;
 319         kstat_named_t arcstat_evict_l2_eligible;
 320         kstat_named_t arcstat_evict_l2_ineligible;
 321         kstat_named_t arcstat_hash_elements;
 322         kstat_named_t arcstat_hash_elements_max;
 323         kstat_named_t arcstat_hash_collisions;
 324         kstat_named_t arcstat_hash_chains;
 325         kstat_named_t arcstat_hash_chain_max;
 326         kstat_named_t arcstat_p;
 327         kstat_named_t arcstat_c;
 328         kstat_named_t arcstat_c_min;
 329         kstat_named_t arcstat_c_max;
 330         kstat_named_t arcstat_size;
 331         kstat_named_t arcstat_hdr_size;
 332         kstat_named_t arcstat_data_size;
 333         kstat_named_t arcstat_other_size;
 334         kstat_named_t arcstat_l2_hits;
 335         kstat_named_t arcstat_l2_misses;
 336         kstat_named_t arcstat_l2_feeds;
 337         kstat_named_t arcstat_l2_rw_clash;
 338         kstat_named_t arcstat_l2_read_bytes;
 339         kstat_named_t arcstat_l2_write_bytes;
 340         kstat_named_t arcstat_l2_writes_sent;
 341         kstat_named_t arcstat_l2_writes_done;
 342         kstat_named_t arcstat_l2_writes_error;
 343         kstat_named_t arcstat_l2_writes_hdr_miss;
 344         kstat_named_t arcstat_l2_evict_lock_retry;
 345         kstat_named_t arcstat_l2_evict_reading;
 346         kstat_named_t arcstat_l2_free_on_write;
 347         kstat_named_t arcstat_l2_abort_lowmem;
 348         kstat_named_t arcstat_l2_cksum_bad;
 349         kstat_named_t arcstat_l2_io_error;
 350         kstat_named_t arcstat_l2_size;
 351         kstat_named_t arcstat_l2_asize;
 352         kstat_named_t arcstat_l2_hdr_size;
 353         kstat_named_t arcstat_l2_compress_successes;
 354         kstat_named_t arcstat_l2_compress_zeros;
 355         kstat_named_t arcstat_l2_compress_failures;
 356         kstat_named_t arcstat_memory_throttle_count;
 357         kstat_named_t arcstat_duplicate_buffers;
 358         kstat_named_t arcstat_duplicate_buffers_size;
 359         kstat_named_t arcstat_duplicate_reads;
 360         kstat_named_t arcstat_meta_used;
 361         kstat_named_t arcstat_meta_limit;
 362         kstat_named_t arcstat_meta_max;
 363 } arc_stats_t;
 364 
 365 static arc_stats_t arc_stats = {
 366         { "hits",                       KSTAT_DATA_UINT64 },
 367         { "misses",                     KSTAT_DATA_UINT64 },
 368         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 369         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 370         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 371         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 372         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 373         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 374         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 375         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 376         { "mru_hits",                   KSTAT_DATA_UINT64 },
 377         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 378         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 379         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 380         { "deleted",                    KSTAT_DATA_UINT64 },
 381         { "recycle_miss",               KSTAT_DATA_UINT64 },
 382         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 383         { "evict_skip",                 KSTAT_DATA_UINT64 },
 384         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 385         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 386         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 387         { "hash_elements",              KSTAT_DATA_UINT64 },
 388         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 389         { "hash_collisions",            KSTAT_DATA_UINT64 },
 390         { "hash_chains",                KSTAT_DATA_UINT64 },
 391         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 392         { "p",                          KSTAT_DATA_UINT64 },
 393         { "c",                          KSTAT_DATA_UINT64 },
 394         { "c_min",                      KSTAT_DATA_UINT64 },
 395         { "c_max",                      KSTAT_DATA_UINT64 },
 396         { "size",                       KSTAT_DATA_UINT64 },
 397         { "hdr_size",                   KSTAT_DATA_UINT64 },
 398         { "data_size",                  KSTAT_DATA_UINT64 },
 399         { "other_size",                 KSTAT_DATA_UINT64 },
 400         { "l2_hits",                    KSTAT_DATA_UINT64 },
 401         { "l2_misses",                  KSTAT_DATA_UINT64 },
 402         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 403         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 404         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 405         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 406         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 407         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 408         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 409         { "l2_writes_hdr_miss",         KSTAT_DATA_UINT64 },
 410         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 411         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 412         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 413         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 414         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 415         { "l2_io_error",                KSTAT_DATA_UINT64 },
 416         { "l2_size",                    KSTAT_DATA_UINT64 },
 417         { "l2_asize",                   KSTAT_DATA_UINT64 },
 418         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 419         { "l2_compress_successes",      KSTAT_DATA_UINT64 },
 420         { "l2_compress_zeros",          KSTAT_DATA_UINT64 },
 421         { "l2_compress_failures",       KSTAT_DATA_UINT64 },
 422         { "memory_throttle_count",      KSTAT_DATA_UINT64 },
 423         { "duplicate_buffers",          KSTAT_DATA_UINT64 },
 424         { "duplicate_buffers_size",     KSTAT_DATA_UINT64 },
 425         { "duplicate_reads",            KSTAT_DATA_UINT64 },
 426         { "arc_meta_used",              KSTAT_DATA_UINT64 },
 427         { "arc_meta_limit",             KSTAT_DATA_UINT64 },
 428         { "arc_meta_max",               KSTAT_DATA_UINT64 }
 429 };
 430 
 431 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 432 
 433 #define ARCSTAT_INCR(stat, val) \
 434         atomic_add_64(&arc_stats.stat.value.ui64, (val))
 435 
 436 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 437 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 438 
 439 #define ARCSTAT_MAX(stat, val) {                                        \
 440         uint64_t m;                                                     \
 441         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 442             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 443                 continue;                                               \
 444 }
 445 
 446 #define ARCSTAT_MAXSTAT(stat) \
 447         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 448 
 449 /*
 450  * We define a macro to allow ARC hits/misses to be easily broken down by
 451  * two separate conditions, giving a total of four different subtypes for
 452  * each of hits and misses (so eight statistics total).
 453  */
 454 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 455         if (cond1) {                                                    \
 456                 if (cond2) {                                            \
 457                         ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
 458                 } else {                                                \
 459                         ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
 460                 }                                                       \
 461         } else {                                                        \
 462                 if (cond2) {                                            \
 463                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
 464                 } else {                                                \
 465                         ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
 466                 }                                                       \
 467         }
 468 
 469 kstat_t                 *arc_ksp;
 470 static arc_state_t      *arc_anon;
 471 static arc_state_t      *arc_mru;
 472 static arc_state_t      *arc_mru_ghost;
 473 static arc_state_t      *arc_mfu;
 474 static arc_state_t      *arc_mfu_ghost;
 475 static arc_state_t      *arc_l2c_only;
 476 
 477 /*
 478  * There are several ARC variables that are critical to export as kstats --
 479  * but we don't want to have to grovel around in the kstat whenever we wish to
 480  * manipulate them.  For these variables, we therefore define them to be in
 481  * terms of the statistic variable.  This assures that we are not introducing
 482  * the possibility of inconsistency by having shadow copies of the variables,
 483  * while still allowing the code to be readable.
 484  */
 485 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 486 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 487 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 488 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 489 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 490 #define arc_meta_limit  ARCSTAT(arcstat_meta_limit) /* max size for metadata */
 491 #define arc_meta_used   ARCSTAT(arcstat_meta_used) /* size of metadata */
 492 #define arc_meta_max    ARCSTAT(arcstat_meta_max) /* max size of metadata */
 493 
 494 #define L2ARC_IS_VALID_COMPRESS(_c_) \
 495         ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
 496 
 497 static int              arc_no_grow;    /* Don't try to grow cache size */
 498 static uint64_t         arc_tempreserve;
 499 static uint64_t         arc_loaned_bytes;
 500 
 501 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
 502 
 503 typedef struct arc_callback arc_callback_t;
 504 
 505 struct arc_callback {
 506         void                    *acb_private;
 507         arc_done_func_t         *acb_done;
 508         arc_buf_t               *acb_buf;
 509         zio_t                   *acb_zio_dummy;
 510         arc_callback_t          *acb_next;
 511 };
 512 
 513 typedef struct arc_write_callback arc_write_callback_t;
 514 
 515 struct arc_write_callback {
 516         void            *awcb_private;
 517         arc_done_func_t *awcb_ready;
 518         arc_done_func_t *awcb_physdone;
 519         arc_done_func_t *awcb_done;
 520         arc_buf_t       *awcb_buf;
 521 };
 522 
 523 struct arc_buf_hdr {
 524         /* protected by hash lock */
 525         dva_t                   b_dva;
 526         uint64_t                b_birth;
 527         uint64_t                b_cksum0;
 528 
 529         kmutex_t                b_freeze_lock;
 530         zio_cksum_t             *b_freeze_cksum;
 531         void                    *b_thawed;
 532 
 533         arc_buf_hdr_t           *b_hash_next;
 534         arc_buf_t               *b_buf;
 535         uint32_t                b_flags;
 536         uint32_t                b_datacnt;
 537 
 538         arc_callback_t          *b_acb;
 539         kcondvar_t              b_cv;
 540 
 541         /* immutable */
 542         arc_buf_contents_t      b_type;
 543         uint64_t                b_size;
 544         uint64_t                b_spa;
 545 
 546         /* protected by arc state mutex */
 547         arc_state_t             *b_state;
 548         list_node_t             b_arc_node;
 549 
 550         /* updated atomically */
 551         clock_t                 b_arc_access;
 552 
 553         /* self protecting */
 554         refcount_t              b_refcnt;
 555 
 556         l2arc_buf_hdr_t         *b_l2hdr;
 557         list_node_t             b_l2node;
 558 };
 559 
 560 static arc_buf_t *arc_eviction_list;
 561 static kmutex_t arc_eviction_mtx;
 562 static arc_buf_hdr_t arc_eviction_hdr;
 563 static void arc_get_data_buf(arc_buf_t *buf);
 564 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
 565 static int arc_evict_needed(arc_buf_contents_t type);
 566 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
 567 static void arc_buf_watch(arc_buf_t *buf);
 568 
 569 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
 570 
 571 #define GHOST_STATE(state)      \
 572         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
 573         (state) == arc_l2c_only)
 574 
 575 /*
 576  * Private ARC flags.  These flags are private ARC only flags that will show up
 577  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
 578  * be passed in as arc_flags in things like arc_read.  However, these flags
 579  * should never be passed and should only be set by ARC code.  When adding new
 580  * public flags, make sure not to smash the private ones.
 581  */
 582 
 583 #define ARC_IN_HASH_TABLE       (1 << 9)  /* this buffer is hashed */
 584 #define ARC_IO_IN_PROGRESS      (1 << 10) /* I/O in progress for buf */
 585 #define ARC_IO_ERROR            (1 << 11) /* I/O failed for buf */
 586 #define ARC_FREED_IN_READ       (1 << 12) /* buf freed while in read */
 587 #define ARC_BUF_AVAILABLE       (1 << 13) /* block not in active use */
 588 #define ARC_INDIRECT            (1 << 14) /* this is an indirect block */
 589 #define ARC_FREE_IN_PROGRESS    (1 << 15) /* hdr about to be freed */
 590 #define ARC_L2_WRITING          (1 << 16) /* L2ARC write in progress */
 591 #define ARC_L2_EVICTED          (1 << 17) /* evicted during I/O */
 592 #define ARC_L2_WRITE_HEAD       (1 << 18) /* head of write list */
 593 
 594 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_IN_HASH_TABLE)
 595 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
 596 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_IO_ERROR)
 597 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_PREFETCH)
 598 #define HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FREED_IN_READ)
 599 #define HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_BUF_AVAILABLE)
 600 #define HDR_FREE_IN_PROGRESS(hdr)       ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
 601 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_L2CACHE)
 602 #define HDR_L2_READING(hdr)     ((hdr)->b_flags & ARC_IO_IN_PROGRESS &&  \
 603                                     (hdr)->b_l2hdr != NULL)
 604 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_L2_WRITING)
 605 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_L2_EVICTED)
 606 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
 607 
 608 /*
 609  * Other sizes
 610  */
 611 
 612 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
 613 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
 614 
 615 /*
 616  * Hash table routines
 617  */
 618 
 619 #define HT_LOCK_PAD     64
 620 
 621 struct ht_lock {
 622         kmutex_t        ht_lock;
 623 #ifdef _KERNEL
 624         unsigned char   pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
 625 #endif
 626 };
 627 
 628 typedef struct buf_hash_table {
 629         uint64_t        ht_mask;
 630         arc_buf_hdr_t   **ht_table;
 631         struct ht_lock  *ht_locks;
 632         uint64_t        ht_num_locks, ht_lock_mask;
 633 } buf_hash_table_t;
 634 
 635 static buf_hash_table_t buf_hash_table;
 636 
 637 #define BUF_HASH_INDEX(spa, dva, birth) \
 638         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
 639 #define BUF_HASH_LOCK_NTRY(idx) \
 640         (buf_hash_table.ht_locks[idx & buf_hash_table.ht_lock_mask])
 641 #define BUF_HASH_LOCK(idx)      (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
 642 #define HDR_LOCK(hdr) \
 643         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
 644 
 645 uint64_t zfs_crc64_table[256];
 646 
 647 /*
 648  * Level 2 ARC
 649  */
 650 
 651 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 652 #define L2ARC_HEADROOM          2                       /* num of writes */
 653 /*
 654  * If we discover during ARC scan any buffers to be compressed, we boost
 655  * our headroom for the next scanning cycle by this percentage multiple.
 656  */
 657 #define L2ARC_HEADROOM_BOOST    200
 658 #define L2ARC_FEED_SECS         1               /* caching interval secs */
 659 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 660 
 661 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 662 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 663 
 664 /* L2ARC Performance Tunables */
 665 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 666 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 667 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 668 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
 669 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 670 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 671 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 672 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 673 boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
 674 
 675 /*
 676  * L2ARC Internals
 677  */
 678 typedef struct l2arc_dev {
 679         vdev_t                  *l2ad_vdev;     /* vdev */
 680         spa_t                   *l2ad_spa;      /* spa */
 681         uint64_t                l2ad_hand;      /* next write location */
 682         uint64_t                l2ad_start;     /* first addr on device */
 683         uint64_t                l2ad_end;       /* last addr on device */
 684         uint64_t                l2ad_evict;     /* last addr eviction reached */
 685         boolean_t               l2ad_first;     /* first sweep through */
 686         boolean_t               l2ad_writing;   /* currently writing */
 687         list_t                  *l2ad_buflist;  /* buffer list */
 688         list_node_t             l2ad_node;      /* device list node */
 689 } l2arc_dev_t;
 690 
 691 static list_t L2ARC_dev_list;                   /* device list */
 692 static list_t *l2arc_dev_list;                  /* device list pointer */
 693 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
 694 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
 695 static kmutex_t l2arc_buflist_mtx;              /* mutex for all buflists */
 696 static list_t L2ARC_free_on_write;              /* free after write buf list */
 697 static list_t *l2arc_free_on_write;             /* free after write list ptr */
 698 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
 699 static uint64_t l2arc_ndev;                     /* number of devices */
 700 
 701 typedef struct l2arc_read_callback {
 702         arc_buf_t               *l2rcb_buf;             /* read buffer */
 703         spa_t                   *l2rcb_spa;             /* spa */
 704         blkptr_t                l2rcb_bp;               /* original blkptr */
 705         zbookmark_t             l2rcb_zb;               /* original bookmark */
 706         int                     l2rcb_flags;            /* original flags */
 707         enum zio_compress       l2rcb_compress;         /* applied compress */
 708 } l2arc_read_callback_t;
 709 
 710 typedef struct l2arc_write_callback {
 711         l2arc_dev_t     *l2wcb_dev;             /* device info */
 712         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
 713 } l2arc_write_callback_t;
 714 
 715 struct l2arc_buf_hdr {
 716         /* protected by arc_buf_hdr  mutex */
 717         l2arc_dev_t             *b_dev;         /* L2ARC device */
 718         uint64_t                b_daddr;        /* disk address, offset byte */
 719         /* compression applied to buffer data */
 720         enum zio_compress       b_compress;
 721         /* real alloc'd buffer size depending on b_compress applied */
 722         int                     b_asize;
 723         /* temporary buffer holder for in-flight compressed data */
 724         void                    *b_tmp_cdata;
 725 };
 726 
 727 typedef struct l2arc_data_free {
 728         /* protected by l2arc_free_on_write_mtx */
 729         void            *l2df_data;
 730         size_t          l2df_size;
 731         void            (*l2df_func)(void *, size_t);
 732         list_node_t     l2df_list_node;
 733 } l2arc_data_free_t;
 734 
 735 static kmutex_t l2arc_feed_thr_lock;
 736 static kcondvar_t l2arc_feed_thr_cv;
 737 static uint8_t l2arc_thread_exit;
 738 
 739 static void l2arc_read_done(zio_t *zio);
 740 static void l2arc_hdr_stat_add(void);
 741 static void l2arc_hdr_stat_remove(void);
 742 
 743 static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
 744 static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
 745     enum zio_compress c);
 746 static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
 747 
 748 static inline uint64_t
 749 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 750 {
 751         uint8_t *vdva = (uint8_t *)dva;
 752         uint64_t crc = -1ULL;
 753         int i;
 754 
 755         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 756 
 757         for (i = 0; i < sizeof (dva_t); i++)
 758                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
 759 
 760         crc ^= (spa>>8) ^ birth;
 761 
 762         return (crc);
 763 }
 764 
 765 #define BUF_EMPTY(buf)                                          \
 766         ((buf)->b_dva.dva_word[0] == 0 &&                    \
 767         (buf)->b_dva.dva_word[1] == 0 &&                     \
 768         (buf)->b_birth == 0)
 769 
 770 #define BUF_EQUAL(spa, dva, birth, buf)                         \
 771         ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
 772         ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
 773         ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
 774 
 775 static void
 776 buf_discard_identity(arc_buf_hdr_t *hdr)
 777 {
 778         hdr->b_dva.dva_word[0] = 0;
 779         hdr->b_dva.dva_word[1] = 0;
 780         hdr->b_birth = 0;
 781         hdr->b_cksum0 = 0;
 782 }
 783 
 784 static arc_buf_hdr_t *
 785 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
 786 {
 787         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 788         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 789         arc_buf_hdr_t *buf;
 790 
 791         mutex_enter(hash_lock);
 792         for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
 793             buf = buf->b_hash_next) {
 794                 if (BUF_EQUAL(spa, dva, birth, buf)) {
 795                         *lockp = hash_lock;
 796                         return (buf);
 797                 }
 798         }
 799         mutex_exit(hash_lock);
 800         *lockp = NULL;
 801         return (NULL);
 802 }
 803 
 804 /*
 805  * Insert an entry into the hash table.  If there is already an element
 806  * equal to elem in the hash table, then the already existing element
 807  * will be returned and the new element will not be inserted.
 808  * Otherwise returns NULL.
 809  */
 810 static arc_buf_hdr_t *
 811 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
 812 {
 813         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 814         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 815         arc_buf_hdr_t *fbuf;
 816         uint32_t i;
 817 
 818         ASSERT(!HDR_IN_HASH_TABLE(buf));
 819         *lockp = hash_lock;
 820         mutex_enter(hash_lock);
 821         for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
 822             fbuf = fbuf->b_hash_next, i++) {
 823                 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
 824                         return (fbuf);
 825         }
 826 
 827         buf->b_hash_next = buf_hash_table.ht_table[idx];
 828         buf_hash_table.ht_table[idx] = buf;
 829         buf->b_flags |= ARC_IN_HASH_TABLE;
 830 
 831         /* collect some hash table performance data */
 832         if (i > 0) {
 833                 ARCSTAT_BUMP(arcstat_hash_collisions);
 834                 if (i == 1)
 835                         ARCSTAT_BUMP(arcstat_hash_chains);
 836 
 837                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
 838         }
 839 
 840         ARCSTAT_BUMP(arcstat_hash_elements);
 841         ARCSTAT_MAXSTAT(arcstat_hash_elements);
 842 
 843         return (NULL);
 844 }
 845 
 846 static void
 847 buf_hash_remove(arc_buf_hdr_t *buf)
 848 {
 849         arc_buf_hdr_t *fbuf, **bufp;
 850         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 851 
 852         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
 853         ASSERT(HDR_IN_HASH_TABLE(buf));
 854 
 855         bufp = &buf_hash_table.ht_table[idx];
 856         while ((fbuf = *bufp) != buf) {
 857                 ASSERT(fbuf != NULL);
 858                 bufp = &fbuf->b_hash_next;
 859         }
 860         *bufp = buf->b_hash_next;
 861         buf->b_hash_next = NULL;
 862         buf->b_flags &= ~ARC_IN_HASH_TABLE;
 863 
 864         /* collect some hash table performance data */
 865         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
 866 
 867         if (buf_hash_table.ht_table[idx] &&
 868             buf_hash_table.ht_table[idx]->b_hash_next == NULL)
 869                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
 870 }
 871 
 872 /*
 873  * Global data structures and functions for the buf kmem cache.
 874  */
 875 static kmem_cache_t *hdr_cache;
 876 static kmem_cache_t *buf_cache;
 877 
 878 static void
 879 buf_fini(void)
 880 {
 881         int i;
 882 
 883         kmem_free(buf_hash_table.ht_table,
 884             (buf_hash_table.ht_mask + 1) * sizeof (void *));
 885 
 886         for (i = 0; i < buf_hash_table.ht_num_locks; i++)
 887                 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
 888         kmem_free(buf_hash_table.ht_locks, sizeof (struct ht_lock) *
 889             buf_hash_table.ht_num_locks);
 890         kmem_cache_destroy(hdr_cache);
 891         kmem_cache_destroy(buf_cache);
 892 }
 893 
 894 /*
 895  * Constructor callback - called when the cache is empty
 896  * and a new buf is requested.
 897  */
 898 /* ARGSUSED */
 899 static int
 900 hdr_cons(void *vbuf, void *unused, int kmflag)
 901 {
 902         arc_buf_hdr_t *buf = vbuf;
 903 
 904         bzero(buf, sizeof (arc_buf_hdr_t));
 905         refcount_create(&buf->b_refcnt);
 906         cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
 907         mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 908         arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 909 
 910         return (0);
 911 }
 912 
 913 /* ARGSUSED */
 914 static int
 915 buf_cons(void *vbuf, void *unused, int kmflag)
 916 {
 917         arc_buf_t *buf = vbuf;
 918 
 919         bzero(buf, sizeof (arc_buf_t));
 920         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
 921         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 922 
 923         return (0);
 924 }
 925 
 926 /*
 927  * Destructor callback - called when a cached buf is
 928  * no longer required.
 929  */
 930 /* ARGSUSED */
 931 static void
 932 hdr_dest(void *vbuf, void *unused)
 933 {
 934         arc_buf_hdr_t *buf = vbuf;
 935 
 936         ASSERT(BUF_EMPTY(buf));
 937         refcount_destroy(&buf->b_refcnt);
 938         cv_destroy(&buf->b_cv);
 939         mutex_destroy(&buf->b_freeze_lock);
 940         arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 941 }
 942 
 943 /* ARGSUSED */
 944 static void
 945 buf_dest(void *vbuf, void *unused)
 946 {
 947         arc_buf_t *buf = vbuf;
 948 
 949         mutex_destroy(&buf->b_evict_lock);
 950         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 951 }
 952 
 953 /*
 954  * Reclaim callback -- invoked when memory is low.
 955  */
 956 /* ARGSUSED */
 957 static void
 958 hdr_recl(void *unused)
 959 {
 960         dprintf("hdr_recl called\n");
 961         /*
 962          * umem calls the reclaim func when we destroy the buf cache,
 963          * which is after we do arc_fini().
 964          */
 965         if (!arc_dead)
 966                 cv_signal(&arc_reclaim_thr_cv);
 967 }
 968 
 969 static void
 970 buf_init(void)
 971 {
 972         uint64_t        *ct;
 973         uint64_t        ht_masklen = 12;
 974         int             i, j;
 975 
 976         while ((1ULL << (ht_masklen + zfs_arc_ht_base_masklen)) <
 977             physmem * PAGESIZE)
 978                 ht_masklen++;
 979         buf_hash_table.ht_mask = (1ULL << ht_masklen) - 1;
 980         buf_hash_table.ht_table =
 981             kmem_zalloc((1ULL << ht_masklen) * sizeof (void *), KM_SLEEP);
 982 
 983         buf_hash_table.ht_num_locks = MAX((physmem * PAGESIZE) >>
 984             zfs_arc_ht_lock_shift, MIN_BUF_LOCKS);
 985         buf_hash_table.ht_lock_mask = buf_hash_table.ht_num_locks - 1;
 986         buf_hash_table.ht_locks = kmem_zalloc(sizeof (struct ht_lock) *
 987             buf_hash_table.ht_num_locks, KM_SLEEP);
 988         for (i = 0; i < buf_hash_table.ht_num_locks; i++) {
 989                 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
 990                     NULL, MUTEX_DEFAULT, NULL);
 991         }
 992 
 993         hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
 994             0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
 995         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
 996             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
 997 
 998         for (i = 0; i < 256; i++)
 999                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1000                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1001 }
1002 
1003 #define ARC_MINTIME     (hz>>4) /* 62 ms */
1004 
1005 static void
1006 arc_cksum_verify(arc_buf_t *buf)
1007 {
1008         zio_cksum_t zc;
1009 
1010         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1011                 return;
1012 
1013         mutex_enter(&buf->b_hdr->b_freeze_lock);
1014         if (buf->b_hdr->b_freeze_cksum == NULL ||
1015             (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1016                 mutex_exit(&buf->b_hdr->b_freeze_lock);
1017                 return;
1018         }
1019         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1020         if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1021                 panic("buffer modified while frozen!");
1022         mutex_exit(&buf->b_hdr->b_freeze_lock);
1023 }
1024 
1025 static int
1026 arc_cksum_equal(arc_buf_t *buf)
1027 {
1028         zio_cksum_t zc;
1029         int equal;
1030 
1031         mutex_enter(&buf->b_hdr->b_freeze_lock);
1032         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1033         equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1034         mutex_exit(&buf->b_hdr->b_freeze_lock);
1035 
1036         return (equal);
1037 }
1038 
1039 static void
1040 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1041 {
1042         if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1043                 return;
1044 
1045         mutex_enter(&buf->b_hdr->b_freeze_lock);
1046         if (buf->b_hdr->b_freeze_cksum != NULL) {
1047                 mutex_exit(&buf->b_hdr->b_freeze_lock);
1048                 return;
1049         }
1050         buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1051         fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1052             buf->b_hdr->b_freeze_cksum);
1053         mutex_exit(&buf->b_hdr->b_freeze_lock);
1054         arc_buf_watch(buf);
1055 }
1056 
1057 #ifndef _KERNEL
1058 typedef struct procctl {
1059         long cmd;
1060         prwatch_t prwatch;
1061 } procctl_t;
1062 #endif
1063 
1064 /* ARGSUSED */
1065 static void
1066 arc_buf_unwatch(arc_buf_t *buf)
1067 {
1068 #ifndef _KERNEL
1069         if (arc_watch) {
1070                 int result;
1071                 procctl_t ctl;
1072                 ctl.cmd = PCWATCH;
1073                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1074                 ctl.prwatch.pr_size = 0;
1075                 ctl.prwatch.pr_wflags = 0;
1076                 result = write(arc_procfd, &ctl, sizeof (ctl));
1077                 ASSERT3U(result, ==, sizeof (ctl));
1078         }
1079 #endif
1080 }
1081 
1082 /* ARGSUSED */
1083 static void
1084 arc_buf_watch(arc_buf_t *buf)
1085 {
1086 #ifndef _KERNEL
1087         if (arc_watch) {
1088                 int result;
1089                 procctl_t ctl;
1090                 ctl.cmd = PCWATCH;
1091                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1092                 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1093                 ctl.prwatch.pr_wflags = WA_WRITE;
1094                 result = write(arc_procfd, &ctl, sizeof (ctl));
1095                 ASSERT3U(result, ==, sizeof (ctl));
1096         }
1097 #endif
1098 }
1099 
1100 void
1101 arc_buf_thaw(arc_buf_t *buf)
1102 {
1103         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1104                 if (buf->b_hdr->b_state != arc_anon)
1105                         panic("modifying non-anon buffer!");
1106                 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1107                         panic("modifying buffer while i/o in progress!");
1108                 arc_cksum_verify(buf);
1109         }
1110 
1111         mutex_enter(&buf->b_hdr->b_freeze_lock);
1112         if (buf->b_hdr->b_freeze_cksum != NULL) {
1113                 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1114                 buf->b_hdr->b_freeze_cksum = NULL;
1115         }
1116 
1117         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1118                 if (buf->b_hdr->b_thawed)
1119                         kmem_free(buf->b_hdr->b_thawed, 1);
1120                 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1121         }
1122 
1123         mutex_exit(&buf->b_hdr->b_freeze_lock);
1124 
1125         arc_buf_unwatch(buf);
1126 }
1127 
1128 void
1129 arc_buf_freeze(arc_buf_t *buf)
1130 {
1131         kmutex_t *hash_lock;
1132 
1133         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1134                 return;
1135 
1136         hash_lock = HDR_LOCK(buf->b_hdr);
1137         mutex_enter(hash_lock);
1138 
1139         ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1140             buf->b_hdr->b_state == arc_anon);
1141         arc_cksum_compute(buf, B_FALSE);
1142         mutex_exit(hash_lock);
1143 
1144 }
1145 
1146 static void
1147 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1148 {
1149         ASSERT(MUTEX_HELD(hash_lock));
1150 
1151         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1152             (ab->b_state != arc_anon)) {
1153                 uint64_t delta = ab->b_size * ab->b_datacnt;
1154                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1155                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1156 
1157                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1158                 mutex_enter(&ab->b_state->arcs_mtx);
1159                 ASSERT(list_link_active(&ab->b_arc_node));
1160                 list_remove(list, ab);
1161                 if (GHOST_STATE(ab->b_state)) {
1162                         ASSERT0(ab->b_datacnt);
1163                         ASSERT3P(ab->b_buf, ==, NULL);
1164                         delta = ab->b_size;
1165                 }
1166                 ASSERT(delta > 0);
1167                 ASSERT3U(*size, >=, delta);
1168                 atomic_add_64(size, -delta);
1169                 mutex_exit(&ab->b_state->arcs_mtx);
1170                 /* remove the prefetch flag if we get a reference */
1171                 if (ab->b_flags & ARC_PREFETCH)
1172                         ab->b_flags &= ~ARC_PREFETCH;
1173         }
1174 }
1175 
1176 static int
1177 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1178 {
1179         int cnt;
1180         arc_state_t *state = ab->b_state;
1181 
1182         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1183         ASSERT(!GHOST_STATE(state));
1184 
1185         if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1186             (state != arc_anon)) {
1187                 uint64_t *size = &state->arcs_lsize[ab->b_type];
1188 
1189                 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1190                 mutex_enter(&state->arcs_mtx);
1191                 ASSERT(!list_link_active(&ab->b_arc_node));
1192                 list_insert_head(&state->arcs_list[ab->b_type], ab);
1193                 ASSERT(ab->b_datacnt > 0);
1194                 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1195                 mutex_exit(&state->arcs_mtx);
1196         }
1197         return (cnt);
1198 }
1199 
1200 /*
1201  * Move the supplied buffer to the indicated state.  The mutex
1202  * for the buffer must be held by the caller.
1203  */
1204 static void
1205 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1206 {
1207         arc_state_t *old_state = ab->b_state;
1208         int64_t refcnt = refcount_count(&ab->b_refcnt);
1209         uint64_t from_delta, to_delta;
1210 
1211         ASSERT(MUTEX_HELD(hash_lock));
1212         ASSERT3P(new_state, !=, old_state);
1213         ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1214         ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1215         ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1216 
1217         from_delta = to_delta = ab->b_datacnt * ab->b_size;
1218 
1219         /*
1220          * If this buffer is evictable, transfer it from the
1221          * old state list to the new state list.
1222          */
1223         if (refcnt == 0) {
1224                 if (old_state != arc_anon) {
1225                         int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1226                         uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1227 
1228                         if (use_mutex)
1229                                 mutex_enter(&old_state->arcs_mtx);
1230 
1231                         ASSERT(list_link_active(&ab->b_arc_node));
1232                         list_remove(&old_state->arcs_list[ab->b_type], ab);
1233 
1234                         /*
1235                          * If prefetching out of the ghost cache,
1236                          * we will have a non-zero datacnt.
1237                          */
1238                         if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1239                                 /* ghost elements have a ghost size */
1240                                 ASSERT(ab->b_buf == NULL);
1241                                 from_delta = ab->b_size;
1242                         }
1243                         ASSERT3U(*size, >=, from_delta);
1244                         atomic_add_64(size, -from_delta);
1245 
1246                         if (use_mutex)
1247                                 mutex_exit(&old_state->arcs_mtx);
1248                 }
1249                 if (new_state != arc_anon) {
1250                         int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1251                         uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1252 
1253                         if (use_mutex)
1254                                 mutex_enter(&new_state->arcs_mtx);
1255 
1256                         list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1257 
1258                         /* ghost elements have a ghost size */
1259                         if (GHOST_STATE(new_state)) {
1260                                 ASSERT(ab->b_datacnt == 0);
1261                                 ASSERT(ab->b_buf == NULL);
1262                                 to_delta = ab->b_size;
1263                         }
1264                         atomic_add_64(size, to_delta);
1265 
1266                         if (use_mutex)
1267                                 mutex_exit(&new_state->arcs_mtx);
1268                 }
1269         }
1270 
1271         ASSERT(!BUF_EMPTY(ab));
1272         if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1273                 buf_hash_remove(ab);
1274 
1275         /* adjust state sizes */
1276         if (to_delta)
1277                 atomic_add_64(&new_state->arcs_size, to_delta);
1278         if (from_delta) {
1279                 ASSERT3U(old_state->arcs_size, >=, from_delta);
1280                 atomic_add_64(&old_state->arcs_size, -from_delta);
1281         }
1282         ab->b_state = new_state;
1283 
1284         /* adjust l2arc hdr stats */
1285         if (new_state == arc_l2c_only)
1286                 l2arc_hdr_stat_add();
1287         else if (old_state == arc_l2c_only)
1288                 l2arc_hdr_stat_remove();
1289 }
1290 
1291 void
1292 arc_space_consume(uint64_t space, arc_space_type_t type)
1293 {
1294         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1295 
1296         switch (type) {
1297         case ARC_SPACE_DATA:
1298                 ARCSTAT_INCR(arcstat_data_size, space);
1299                 break;
1300         case ARC_SPACE_OTHER:
1301                 ARCSTAT_INCR(arcstat_other_size, space);
1302                 break;
1303         case ARC_SPACE_HDRS:
1304                 ARCSTAT_INCR(arcstat_hdr_size, space);
1305                 break;
1306         case ARC_SPACE_L2HDRS:
1307                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1308                 break;
1309         }
1310 
1311         ARCSTAT_INCR(arcstat_meta_used, space);
1312         atomic_add_64(&arc_size, space);
1313 }
1314 
1315 void
1316 arc_space_return(uint64_t space, arc_space_type_t type)
1317 {
1318         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1319 
1320         switch (type) {
1321         case ARC_SPACE_DATA:
1322                 ARCSTAT_INCR(arcstat_data_size, -space);
1323                 break;
1324         case ARC_SPACE_OTHER:
1325                 ARCSTAT_INCR(arcstat_other_size, -space);
1326                 break;
1327         case ARC_SPACE_HDRS:
1328                 ARCSTAT_INCR(arcstat_hdr_size, -space);
1329                 break;
1330         case ARC_SPACE_L2HDRS:
1331                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1332                 break;
1333         }
1334 
1335         ASSERT(arc_meta_used >= space);
1336         if (arc_meta_max < arc_meta_used)
1337                 arc_meta_max = arc_meta_used;
1338         ARCSTAT_INCR(arcstat_meta_used, -space);
1339         ASSERT(arc_size >= space);
1340         atomic_add_64(&arc_size, -space);
1341 }
1342 
1343 void *
1344 arc_data_buf_alloc(uint64_t size)
1345 {
1346         if (arc_evict_needed(ARC_BUFC_DATA))
1347                 cv_signal(&arc_reclaim_thr_cv);
1348         atomic_add_64(&arc_size, size);
1349         return (zio_data_buf_alloc(size));
1350 }
1351 
1352 void
1353 arc_data_buf_free(void *buf, uint64_t size)
1354 {
1355         zio_data_buf_free(buf, size);
1356         ASSERT(arc_size >= size);
1357         atomic_add_64(&arc_size, -size);
1358 }
1359 
1360 arc_buf_t *
1361 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1362 {
1363         arc_buf_hdr_t *hdr;
1364         arc_buf_t *buf;
1365 
1366         ASSERT3U(size, >, 0);
1367         hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1368         ASSERT(BUF_EMPTY(hdr));
1369         hdr->b_size = size;
1370         hdr->b_type = type;
1371         hdr->b_spa = spa_load_guid(spa);
1372         hdr->b_state = arc_anon;
1373         hdr->b_arc_access = 0;
1374         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1375         buf->b_hdr = hdr;
1376         buf->b_data = NULL;
1377         buf->b_efunc = NULL;
1378         buf->b_private = NULL;
1379         buf->b_next = NULL;
1380         hdr->b_buf = buf;
1381         arc_get_data_buf(buf);
1382         hdr->b_datacnt = 1;
1383         hdr->b_flags = 0;
1384         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1385         (void) refcount_add(&hdr->b_refcnt, tag);
1386 
1387         return (buf);
1388 }
1389 
1390 static char *arc_onloan_tag = "onloan";
1391 
1392 /*
1393  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1394  * flight data by arc_tempreserve_space() until they are "returned". Loaned
1395  * buffers must be returned to the arc before they can be used by the DMU or
1396  * freed.
1397  */
1398 arc_buf_t *
1399 arc_loan_buf(spa_t *spa, int size)
1400 {
1401         arc_buf_t *buf;
1402 
1403         buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1404 
1405         atomic_add_64(&arc_loaned_bytes, size);
1406         return (buf);
1407 }
1408 
1409 /*
1410  * Return a loaned arc buffer to the arc.
1411  */
1412 void
1413 arc_return_buf(arc_buf_t *buf, void *tag)
1414 {
1415         arc_buf_hdr_t *hdr = buf->b_hdr;
1416 
1417         ASSERT(buf->b_data != NULL);
1418         (void) refcount_add(&hdr->b_refcnt, tag);
1419         (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1420 
1421         atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1422 }
1423 
1424 /* Detach an arc_buf from a dbuf (tag) */
1425 void
1426 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1427 {
1428         arc_buf_hdr_t *hdr;
1429 
1430         ASSERT(buf->b_data != NULL);
1431         hdr = buf->b_hdr;
1432         (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1433         (void) refcount_remove(&hdr->b_refcnt, tag);
1434         buf->b_efunc = NULL;
1435         buf->b_private = NULL;
1436 
1437         atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1438 }
1439 
1440 static arc_buf_t *
1441 arc_buf_clone(arc_buf_t *from)
1442 {
1443         arc_buf_t *buf;
1444         arc_buf_hdr_t *hdr = from->b_hdr;
1445         uint64_t size = hdr->b_size;
1446 
1447         ASSERT(hdr->b_state != arc_anon);
1448 
1449         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1450         buf->b_hdr = hdr;
1451         buf->b_data = NULL;
1452         buf->b_efunc = NULL;
1453         buf->b_private = NULL;
1454         buf->b_next = hdr->b_buf;
1455         hdr->b_buf = buf;
1456         arc_get_data_buf(buf);
1457         bcopy(from->b_data, buf->b_data, size);
1458 
1459         /*
1460          * This buffer already exists in the arc so create a duplicate
1461          * copy for the caller.  If the buffer is associated with user data
1462          * then track the size and number of duplicates.  These stats will be
1463          * updated as duplicate buffers are created and destroyed.
1464          */
1465         if (hdr->b_type == ARC_BUFC_DATA) {
1466                 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1467                 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1468         }
1469         hdr->b_datacnt += 1;
1470         return (buf);
1471 }
1472 
1473 void
1474 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1475 {
1476         arc_buf_hdr_t *hdr;
1477         kmutex_t *hash_lock;
1478 
1479         /*
1480          * Check to see if this buffer is evicted.  Callers
1481          * must verify b_data != NULL to know if the add_ref
1482          * was successful.
1483          */
1484         mutex_enter(&buf->b_evict_lock);
1485         if (buf->b_data == NULL) {
1486                 mutex_exit(&buf->b_evict_lock);
1487                 return;
1488         }
1489         hash_lock = HDR_LOCK(buf->b_hdr);
1490         mutex_enter(hash_lock);
1491         hdr = buf->b_hdr;
1492         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1493         mutex_exit(&buf->b_evict_lock);
1494 
1495         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1496         add_reference(hdr, hash_lock, tag);
1497         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1498         arc_access(hdr, hash_lock);
1499         mutex_exit(hash_lock);
1500         ARCSTAT_BUMP(arcstat_hits);
1501         ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1502             demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1503             data, metadata, hits);
1504 }
1505 
1506 /*
1507  * Free the arc data buffer.  If it is an l2arc write in progress,
1508  * the buffer is placed on l2arc_free_on_write to be freed later.
1509  */
1510 static void
1511 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1512 {
1513         arc_buf_hdr_t *hdr = buf->b_hdr;
1514 
1515         if (HDR_L2_WRITING(hdr)) {
1516                 l2arc_data_free_t *df;
1517                 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1518                 df->l2df_data = buf->b_data;
1519                 df->l2df_size = hdr->b_size;
1520                 df->l2df_func = free_func;
1521                 mutex_enter(&l2arc_free_on_write_mtx);
1522                 list_insert_head(l2arc_free_on_write, df);
1523                 mutex_exit(&l2arc_free_on_write_mtx);
1524                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1525         } else {
1526                 free_func(buf->b_data, hdr->b_size);
1527         }
1528 }
1529 
1530 static void
1531 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1532 {
1533         arc_buf_t **bufp;
1534 
1535         /* free up data associated with the buf */
1536         if (buf->b_data) {
1537                 arc_state_t *state = buf->b_hdr->b_state;
1538                 uint64_t size = buf->b_hdr->b_size;
1539                 arc_buf_contents_t type = buf->b_hdr->b_type;
1540 
1541                 arc_cksum_verify(buf);
1542                 arc_buf_unwatch(buf);
1543 
1544                 if (!recycle) {
1545                         if (type == ARC_BUFC_METADATA) {
1546                                 arc_buf_data_free(buf, zio_buf_free);
1547                                 arc_space_return(size, ARC_SPACE_DATA);
1548                         } else {
1549                                 ASSERT(type == ARC_BUFC_DATA);
1550                                 arc_buf_data_free(buf, zio_data_buf_free);
1551                                 ARCSTAT_INCR(arcstat_data_size, -size);
1552                                 atomic_add_64(&arc_size, -size);
1553                         }
1554                 }
1555                 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1556                         uint64_t *cnt = &state->arcs_lsize[type];
1557 
1558                         ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1559                         ASSERT(state != arc_anon);
1560 
1561                         ASSERT3U(*cnt, >=, size);
1562                         atomic_add_64(cnt, -size);
1563                 }
1564                 ASSERT3U(state->arcs_size, >=, size);
1565                 atomic_add_64(&state->arcs_size, -size);
1566                 buf->b_data = NULL;
1567 
1568                 /*
1569                  * If we're destroying a duplicate buffer make sure
1570                  * that the appropriate statistics are updated.
1571                  */
1572                 if (buf->b_hdr->b_datacnt > 1 &&
1573                     buf->b_hdr->b_type == ARC_BUFC_DATA) {
1574                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1575                         ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1576                 }
1577                 ASSERT(buf->b_hdr->b_datacnt > 0);
1578                 buf->b_hdr->b_datacnt -= 1;
1579         }
1580 
1581         /* only remove the buf if requested */
1582         if (!all)
1583                 return;
1584 
1585         /* remove the buf from the hdr list */
1586         for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1587                 continue;
1588         *bufp = buf->b_next;
1589         buf->b_next = NULL;
1590 
1591         ASSERT(buf->b_efunc == NULL);
1592 
1593         /* clean up the buf */
1594         buf->b_hdr = NULL;
1595         kmem_cache_free(buf_cache, buf);
1596 }
1597 
1598 static void
1599 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1600 {
1601         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1602         ASSERT3P(hdr->b_state, ==, arc_anon);
1603         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1604         l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1605 
1606         if (l2hdr != NULL) {
1607                 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1608                 /*
1609                  * To prevent arc_free() and l2arc_evict() from
1610                  * attempting to free the same buffer at the same time,
1611                  * a FREE_IN_PROGRESS flag is given to arc_free() to
1612                  * give it priority.  l2arc_evict() can't destroy this
1613                  * header while we are waiting on l2arc_buflist_mtx.
1614                  *
1615                  * The hdr may be removed from l2ad_buflist before we
1616                  * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1617                  */
1618                 if (!buflist_held) {
1619                         mutex_enter(&l2arc_buflist_mtx);
1620                         l2hdr = hdr->b_l2hdr;
1621                 }
1622 
1623                 if (l2hdr != NULL) {
1624                         list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1625                         ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1626                         ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1627                         kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1628                         if (hdr->b_state == arc_l2c_only)
1629                                 l2arc_hdr_stat_remove();
1630                         hdr->b_l2hdr = NULL;
1631                 }
1632 
1633                 if (!buflist_held)
1634                         mutex_exit(&l2arc_buflist_mtx);
1635         }
1636 
1637         if (!BUF_EMPTY(hdr)) {
1638                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1639                 buf_discard_identity(hdr);
1640         }
1641         while (hdr->b_buf) {
1642                 arc_buf_t *buf = hdr->b_buf;
1643 
1644                 if (buf->b_efunc) {
1645                         mutex_enter(&arc_eviction_mtx);
1646                         mutex_enter(&buf->b_evict_lock);
1647                         ASSERT(buf->b_hdr != NULL);
1648                         arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1649                         hdr->b_buf = buf->b_next;
1650                         buf->b_hdr = &arc_eviction_hdr;
1651                         buf->b_next = arc_eviction_list;
1652                         arc_eviction_list = buf;
1653                         mutex_exit(&buf->b_evict_lock);
1654                         mutex_exit(&arc_eviction_mtx);
1655                 } else {
1656                         arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1657                 }
1658         }
1659         if (hdr->b_freeze_cksum != NULL) {
1660                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1661                 hdr->b_freeze_cksum = NULL;
1662         }
1663         if (hdr->b_thawed) {
1664                 kmem_free(hdr->b_thawed, 1);
1665                 hdr->b_thawed = NULL;
1666         }
1667 
1668         ASSERT(!list_link_active(&hdr->b_arc_node));
1669         ASSERT3P(hdr->b_hash_next, ==, NULL);
1670         ASSERT3P(hdr->b_acb, ==, NULL);
1671         kmem_cache_free(hdr_cache, hdr);
1672 }
1673 
1674 void
1675 arc_buf_free(arc_buf_t *buf, void *tag)
1676 {
1677         arc_buf_hdr_t *hdr = buf->b_hdr;
1678         int hashed = hdr->b_state != arc_anon;
1679 
1680         ASSERT(buf->b_efunc == NULL);
1681         ASSERT(buf->b_data != NULL);
1682 
1683         if (hashed) {
1684                 kmutex_t *hash_lock = HDR_LOCK(hdr);
1685 
1686                 mutex_enter(hash_lock);
1687                 hdr = buf->b_hdr;
1688                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1689 
1690                 (void) remove_reference(hdr, hash_lock, tag);
1691                 if (hdr->b_datacnt > 1) {
1692                         arc_buf_destroy(buf, FALSE, TRUE);
1693                 } else {
1694                         ASSERT(buf == hdr->b_buf);
1695                         ASSERT(buf->b_efunc == NULL);
1696                         hdr->b_flags |= ARC_BUF_AVAILABLE;
1697                 }
1698                 mutex_exit(hash_lock);
1699         } else if (HDR_IO_IN_PROGRESS(hdr)) {
1700                 int destroy_hdr;
1701                 /*
1702                  * We are in the middle of an async write.  Don't destroy
1703                  * this buffer unless the write completes before we finish
1704                  * decrementing the reference count.
1705                  */
1706                 mutex_enter(&arc_eviction_mtx);
1707                 (void) remove_reference(hdr, NULL, tag);
1708                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1709                 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1710                 mutex_exit(&arc_eviction_mtx);
1711                 if (destroy_hdr)
1712                         arc_hdr_destroy(hdr);
1713         } else {
1714                 if (remove_reference(hdr, NULL, tag) > 0)
1715                         arc_buf_destroy(buf, FALSE, TRUE);
1716                 else
1717                         arc_hdr_destroy(hdr);
1718         }
1719 }
1720 
1721 boolean_t
1722 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1723 {
1724         arc_buf_hdr_t *hdr = buf->b_hdr;
1725         kmutex_t *hash_lock = HDR_LOCK(hdr);
1726         boolean_t no_callback = (buf->b_efunc == NULL);
1727 
1728         if (hdr->b_state == arc_anon) {
1729                 ASSERT(hdr->b_datacnt == 1);
1730                 arc_buf_free(buf, tag);
1731                 return (no_callback);
1732         }
1733 
1734         mutex_enter(hash_lock);
1735         hdr = buf->b_hdr;
1736         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1737         ASSERT(hdr->b_state != arc_anon);
1738         ASSERT(buf->b_data != NULL);
1739 
1740         (void) remove_reference(hdr, hash_lock, tag);
1741         if (hdr->b_datacnt > 1) {
1742                 if (no_callback)
1743                         arc_buf_destroy(buf, FALSE, TRUE);
1744         } else if (no_callback) {
1745                 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1746                 ASSERT(buf->b_efunc == NULL);
1747                 hdr->b_flags |= ARC_BUF_AVAILABLE;
1748         }
1749         ASSERT(no_callback || hdr->b_datacnt > 1 ||
1750             refcount_is_zero(&hdr->b_refcnt));
1751         mutex_exit(hash_lock);
1752         return (no_callback);
1753 }
1754 
1755 int
1756 arc_buf_size(arc_buf_t *buf)
1757 {
1758         return (buf->b_hdr->b_size);
1759 }
1760 
1761 /*
1762  * Called from the DMU to determine if the current buffer should be
1763  * evicted. In order to ensure proper locking, the eviction must be initiated
1764  * from the DMU. Return true if the buffer is associated with user data and
1765  * duplicate buffers still exist.
1766  */
1767 boolean_t
1768 arc_buf_eviction_needed(arc_buf_t *buf)
1769 {
1770         arc_buf_hdr_t *hdr;
1771         boolean_t evict_needed = B_FALSE;
1772 
1773         if (zfs_disable_dup_eviction)
1774                 return (B_FALSE);
1775 
1776         mutex_enter(&buf->b_evict_lock);
1777         hdr = buf->b_hdr;
1778         if (hdr == NULL) {
1779                 /*
1780                  * We are in arc_do_user_evicts(); let that function
1781                  * perform the eviction.
1782                  */
1783                 ASSERT(buf->b_data == NULL);
1784                 mutex_exit(&buf->b_evict_lock);
1785                 return (B_FALSE);
1786         } else if (buf->b_data == NULL) {
1787                 /*
1788                  * We have already been added to the arc eviction list;
1789                  * recommend eviction.
1790                  */
1791                 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1792                 mutex_exit(&buf->b_evict_lock);
1793                 return (B_TRUE);
1794         }
1795 
1796         if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1797                 evict_needed = B_TRUE;
1798 
1799         mutex_exit(&buf->b_evict_lock);
1800         return (evict_needed);
1801 }
1802 
1803 /*
1804  * Evict buffers from list until we've removed the specified number of
1805  * bytes.  Move the removed buffers to the appropriate evict state.
1806  * If the recycle flag is set, then attempt to "recycle" a buffer:
1807  * - look for a buffer to evict that is `bytes' long.
1808  * - return the data block from this buffer rather than freeing it.
1809  * This flag is used by callers that are trying to make space for a
1810  * new buffer in a full arc cache.
1811  *
1812  * This function makes a "best effort".  It skips over any buffers
1813  * it can't get a hash_lock on, and so may not catch all candidates.
1814  * It may also return without evicting as much space as requested.
1815  */
1816 static void *
1817 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1818     arc_buf_contents_t type)
1819 {
1820         arc_state_t *evicted_state;
1821         uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1822         arc_buf_hdr_t *ab, *ab_prev = NULL;
1823         list_t *list = &state->arcs_list[type];
1824         kmutex_t *hash_lock;
1825         boolean_t have_lock;
1826         void *stolen = NULL;
1827         arc_buf_hdr_t marker = { 0 };
1828         int count = 0;
1829 
1830         ASSERT(state == arc_mru || state == arc_mfu);
1831 
1832         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1833 
1834         mutex_enter(&state->arcs_mtx);
1835         mutex_enter(&evicted_state->arcs_mtx);
1836 
1837         for (ab = list_tail(list); ab; ab = ab_prev) {
1838                 ab_prev = list_prev(list, ab);
1839                 /* prefetch buffers have a minimum lifespan */
1840                 if (HDR_IO_IN_PROGRESS(ab) ||
1841                     (spa && ab->b_spa != spa) ||
1842                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1843                     ddi_get_lbolt() - ab->b_arc_access <
1844                     arc_min_prefetch_lifespan)) {
1845                         skipped++;
1846                         continue;
1847                 }
1848                 /* "lookahead" for better eviction candidate */
1849                 if (recycle && ab->b_size != bytes &&
1850                     ab_prev && ab_prev->b_size == bytes)
1851                         continue;
1852 
1853                 /* ignore markers */
1854                 if (ab->b_spa == 0)
1855                         continue;
1856 
1857                 /*
1858                  * It may take a long time to evict all the bufs requested.
1859                  * To avoid blocking all arc activity, periodically drop
1860                  * the arcs_mtx and give other threads a chance to run
1861                  * before reacquiring the lock.
1862                  *
1863                  * If we are looking for a buffer to recycle, we are in
1864                  * the hot code path, so don't sleep.
1865                  */
1866                 if (!recycle && count++ > arc_evict_iterations) {
1867                         list_insert_after(list, ab, &marker);
1868                         mutex_exit(&evicted_state->arcs_mtx);
1869                         mutex_exit(&state->arcs_mtx);
1870                         kpreempt(KPREEMPT_SYNC);
1871                         mutex_enter(&state->arcs_mtx);
1872                         mutex_enter(&evicted_state->arcs_mtx);
1873                         ab_prev = list_prev(list, &marker);
1874                         list_remove(list, &marker);
1875                         count = 0;
1876                         continue;
1877                 }
1878 
1879                 hash_lock = HDR_LOCK(ab);
1880                 have_lock = MUTEX_HELD(hash_lock);
1881                 if (have_lock || mutex_tryenter(hash_lock)) {
1882                         ASSERT0(refcount_count(&ab->b_refcnt));
1883                         ASSERT(ab->b_datacnt > 0);
1884                         while (ab->b_buf) {
1885                                 arc_buf_t *buf = ab->b_buf;
1886                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1887                                         missed += 1;
1888                                         break;
1889                                 }
1890                                 if (buf->b_data) {
1891                                         bytes_evicted += ab->b_size;
1892                                         if (recycle && ab->b_type == type &&
1893                                             ab->b_size == bytes &&
1894                                             !HDR_L2_WRITING(ab)) {
1895                                                 stolen = buf->b_data;
1896                                                 recycle = FALSE;
1897                                         }
1898                                 }
1899                                 if (buf->b_efunc) {
1900                                         mutex_enter(&arc_eviction_mtx);
1901                                         arc_buf_destroy(buf,
1902                                             buf->b_data == stolen, FALSE);
1903                                         ab->b_buf = buf->b_next;
1904                                         buf->b_hdr = &arc_eviction_hdr;
1905                                         buf->b_next = arc_eviction_list;
1906                                         arc_eviction_list = buf;
1907                                         mutex_exit(&arc_eviction_mtx);
1908                                         mutex_exit(&buf->b_evict_lock);
1909                                 } else {
1910                                         mutex_exit(&buf->b_evict_lock);
1911                                         arc_buf_destroy(buf,
1912                                             buf->b_data == stolen, TRUE);
1913                                 }
1914                         }
1915 
1916                         if (ab->b_l2hdr) {
1917                                 ARCSTAT_INCR(arcstat_evict_l2_cached,
1918                                     ab->b_size);
1919                         } else {
1920                                 if (l2arc_write_eligible(ab->b_spa, ab)) {
1921                                         ARCSTAT_INCR(arcstat_evict_l2_eligible,
1922                                             ab->b_size);
1923                                 } else {
1924                                         ARCSTAT_INCR(
1925                                             arcstat_evict_l2_ineligible,
1926                                             ab->b_size);
1927                                 }
1928                         }
1929 
1930                         if (ab->b_datacnt == 0) {
1931                                 arc_change_state(evicted_state, ab, hash_lock);
1932                                 ASSERT(HDR_IN_HASH_TABLE(ab));
1933                                 ab->b_flags |= ARC_IN_HASH_TABLE;
1934                                 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1935                                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1936                         }
1937                         if (!have_lock)
1938                                 mutex_exit(hash_lock);
1939                         if (bytes >= 0 && bytes_evicted >= bytes)
1940                                 break;
1941                 } else {
1942                         missed += 1;
1943                 }
1944         }
1945 
1946         mutex_exit(&evicted_state->arcs_mtx);
1947         mutex_exit(&state->arcs_mtx);
1948 
1949         if (bytes_evicted < bytes)
1950                 dprintf("only evicted %lld bytes from %x",
1951                     (longlong_t)bytes_evicted, state);
1952 
1953         if (skipped)
1954                 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1955 
1956         if (missed)
1957                 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1958 
1959         /*
1960          * Note: we have just evicted some data into the ghost state,
1961          * potentially putting the ghost size over the desired size.  Rather
1962          * that evicting from the ghost list in this hot code path, leave
1963          * this chore to the arc_reclaim_thread().
1964          */
1965 
1966         return (stolen);
1967 }
1968 
1969 /*
1970  * Remove buffers from list until we've removed the specified number of
1971  * bytes.  Destroy the buffers that are removed.
1972  */
1973 static void
1974 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1975 {
1976         arc_buf_hdr_t *ab, *ab_prev;
1977         arc_buf_hdr_t marker = { 0 };
1978         list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1979         kmutex_t *hash_lock;
1980         uint64_t bytes_deleted = 0;
1981         uint64_t bufs_skipped = 0;
1982         int count = 0;
1983 
1984         ASSERT(GHOST_STATE(state));
1985 top:
1986         mutex_enter(&state->arcs_mtx);
1987         for (ab = list_tail(list); ab; ab = ab_prev) {
1988                 ab_prev = list_prev(list, ab);
1989                 if (ab->b_type > ARC_BUFC_NUMTYPES)
1990                         panic("invalid ab=%p", (void *)ab);
1991                 if (spa && ab->b_spa != spa)
1992                         continue;
1993 
1994                 /* ignore markers */
1995                 if (ab->b_spa == 0)
1996                         continue;
1997 
1998                 hash_lock = HDR_LOCK(ab);
1999                 /* caller may be trying to modify this buffer, skip it */
2000                 if (MUTEX_HELD(hash_lock))
2001                         continue;
2002 
2003                 /*
2004                  * It may take a long time to evict all the bufs requested.
2005                  * To avoid blocking all arc activity, periodically drop
2006                  * the arcs_mtx and give other threads a chance to run
2007                  * before reacquiring the lock.
2008                  */
2009                 if (count++ > arc_evict_iterations) {
2010                         list_insert_after(list, ab, &marker);
2011                         mutex_exit(&state->arcs_mtx);
2012                         kpreempt(KPREEMPT_SYNC);
2013                         mutex_enter(&state->arcs_mtx);
2014                         ab_prev = list_prev(list, &marker);
2015                         list_remove(list, &marker);
2016                         count = 0;
2017                         continue;
2018                 }
2019                 if (mutex_tryenter(hash_lock)) {
2020                         ASSERT(!HDR_IO_IN_PROGRESS(ab));
2021                         ASSERT(ab->b_buf == NULL);
2022                         ARCSTAT_BUMP(arcstat_deleted);
2023                         bytes_deleted += ab->b_size;
2024 
2025                         if (ab->b_l2hdr != NULL) {
2026                                 /*
2027                                  * This buffer is cached on the 2nd Level ARC;
2028                                  * don't destroy the header.
2029                                  */
2030                                 arc_change_state(arc_l2c_only, ab, hash_lock);
2031                                 mutex_exit(hash_lock);
2032                         } else {
2033                                 arc_change_state(arc_anon, ab, hash_lock);
2034                                 mutex_exit(hash_lock);
2035                                 arc_hdr_destroy(ab);
2036                         }
2037 
2038                         DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2039                         if (bytes >= 0 && bytes_deleted >= bytes)
2040                                 break;
2041                 } else if (bytes < 0) {
2042                         /*
2043                          * Insert a list marker and then wait for the
2044                          * hash lock to become available. Once its
2045                          * available, restart from where we left off.
2046                          */
2047                         list_insert_after(list, ab, &marker);
2048                         mutex_exit(&state->arcs_mtx);
2049                         mutex_enter(hash_lock);
2050                         mutex_exit(hash_lock);
2051                         mutex_enter(&state->arcs_mtx);
2052                         ab_prev = list_prev(list, &marker);
2053                         list_remove(list, &marker);
2054                 } else {
2055                         bufs_skipped += 1;
2056                 }
2057 
2058         }
2059         mutex_exit(&state->arcs_mtx);
2060 
2061         if (list == &state->arcs_list[ARC_BUFC_DATA] &&
2062             (bytes < 0 || bytes_deleted < bytes)) {
2063                 list = &state->arcs_list[ARC_BUFC_METADATA];
2064                 goto top;
2065         }
2066 
2067         if (bufs_skipped) {
2068                 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2069                 ASSERT(bytes >= 0);
2070         }
2071 
2072         if (bytes_deleted < bytes)
2073                 dprintf("only deleted %lld bytes from %p",
2074                     (longlong_t)bytes_deleted, state);
2075 }
2076 
2077 static void
2078 arc_adjust(void)
2079 {
2080         int64_t adjustment, delta;
2081 
2082         /*
2083          * Adjust MRU size
2084          */
2085 
2086         adjustment = MIN((int64_t)(arc_size - arc_c),
2087             (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2088             arc_p));
2089 
2090         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2091                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2092                 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
2093                 adjustment -= delta;
2094         }
2095 
2096         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2097                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2098                 (void) arc_evict(arc_mru, NULL, delta, FALSE,
2099                     ARC_BUFC_METADATA);
2100         }
2101 
2102         /*
2103          * Adjust MFU size
2104          */
2105 
2106         adjustment = arc_size - arc_c;
2107 
2108         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2109                 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2110                 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
2111                 adjustment -= delta;
2112         }
2113 
2114         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2115                 int64_t delta = MIN(adjustment,
2116                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2117                 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
2118                     ARC_BUFC_METADATA);
2119         }
2120 
2121         /*
2122          * Adjust ghost lists
2123          */
2124 
2125         adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2126 
2127         if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2128                 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2129                 arc_evict_ghost(arc_mru_ghost, NULL, delta);
2130         }
2131 
2132         adjustment =
2133             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2134 
2135         if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2136                 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2137                 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
2138         }
2139 }
2140 
2141 static void
2142 arc_do_user_evicts(void)
2143 {
2144         mutex_enter(&arc_eviction_mtx);
2145         while (arc_eviction_list != NULL) {
2146                 arc_buf_t *buf = arc_eviction_list;
2147                 arc_eviction_list = buf->b_next;
2148                 mutex_enter(&buf->b_evict_lock);
2149                 buf->b_hdr = NULL;
2150                 mutex_exit(&buf->b_evict_lock);
2151                 mutex_exit(&arc_eviction_mtx);
2152 
2153                 if (buf->b_efunc != NULL)
2154                         VERIFY(buf->b_efunc(buf) == 0);
2155 
2156                 buf->b_efunc = NULL;
2157                 buf->b_private = NULL;
2158                 kmem_cache_free(buf_cache, buf);
2159                 mutex_enter(&arc_eviction_mtx);
2160         }
2161         mutex_exit(&arc_eviction_mtx);
2162 }
2163 
2164 /*
2165  * Flush all *evictable* data from the cache for the given spa.
2166  * NOTE: this will not touch "active" (i.e. referenced) data.
2167  */
2168 void
2169 arc_flush(spa_t *spa)
2170 {
2171         uint64_t guid = 0;
2172 
2173         if (spa)
2174                 guid = spa_load_guid(spa);
2175 
2176         while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
2177                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2178                 if (spa)
2179                         break;
2180         }
2181         while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
2182                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2183                 if (spa)
2184                         break;
2185         }
2186         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
2187                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2188                 if (spa)
2189                         break;
2190         }
2191         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
2192                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2193                 if (spa)
2194                         break;
2195         }
2196 
2197         arc_evict_ghost(arc_mru_ghost, guid, -1);
2198         arc_evict_ghost(arc_mfu_ghost, guid, -1);
2199 
2200         mutex_enter(&arc_reclaim_thr_lock);
2201         arc_do_user_evicts();
2202         mutex_exit(&arc_reclaim_thr_lock);
2203         ASSERT(spa || arc_eviction_list == NULL);
2204 }
2205 
2206 void
2207 arc_shrink(void)
2208 {
2209         if (arc_c > arc_c_min) {
2210                 uint64_t to_free;
2211 
2212 #ifdef _KERNEL
2213                 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
2214 #else
2215                 to_free = arc_c >> arc_shrink_shift;
2216 #endif
2217                 if (arc_c > arc_c_min + to_free)
2218                         atomic_add_64(&arc_c, -to_free);
2219                 else
2220                         arc_c = arc_c_min;
2221 
2222                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2223                 if (arc_c > arc_size)
2224                         arc_c = MAX(arc_size, arc_c_min);
2225                 if (arc_p > arc_c)
2226                         arc_p = (arc_c >> 1);
2227                 ASSERT(arc_c >= arc_c_min);
2228                 ASSERT((int64_t)arc_p >= 0);
2229         }
2230 
2231         if (arc_size > arc_c)
2232                 arc_adjust();
2233 }
2234 
2235 /*
2236  * Determine if the system is under memory pressure and is asking
2237  * to reclaim memory. A return value of 1 indicates that the system
2238  * is under memory pressure and that the arc should adjust accordingly.
2239  */
2240 static int
2241 arc_reclaim_needed(void)
2242 {
2243         uint64_t extra;
2244 
2245 #ifdef _KERNEL
2246 
2247         if (needfree)
2248                 return (1);
2249 
2250         /*
2251          * take 'desfree' extra pages, so we reclaim sooner, rather than later
2252          */
2253         extra = desfree;
2254 
2255         /*
2256          * check that we're out of range of the pageout scanner.  It starts to
2257          * schedule paging if freemem is less than lotsfree and needfree.
2258          * lotsfree is the high-water mark for pageout, and needfree is the
2259          * number of needed free pages.  We add extra pages here to make sure
2260          * the scanner doesn't start up while we're freeing memory.
2261          */
2262         if (freemem < lotsfree + needfree + extra)
2263                 return (1);
2264 
2265         /*
2266          * check to make sure that swapfs has enough space so that anon
2267          * reservations can still succeed. anon_resvmem() checks that the
2268          * availrmem is greater than swapfs_minfree, and the number of reserved
2269          * swap pages.  We also add a bit of extra here just to prevent
2270          * circumstances from getting really dire.
2271          */
2272         if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2273                 return (1);
2274 
2275         /*
2276          * Check that we have enough availrmem that memory locking (e.g., via
2277          * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
2278          * stores the number of pages that cannot be locked; when availrmem
2279          * drops below pages_pp_maximum, page locking mechanisms such as
2280          * page_pp_lock() will fail.)
2281          */
2282         if (availrmem <= pages_pp_maximum)
2283                 return (1);
2284 
2285 #if defined(__i386)
2286         /*
2287          * If we're on an i386 platform, it's possible that we'll exhaust the
2288          * kernel heap space before we ever run out of available physical
2289          * memory.  Most checks of the size of the heap_area compare against
2290          * tune.t_minarmem, which is the minimum available real memory that we
2291          * can have in the system.  However, this is generally fixed at 25 pages
2292          * which is so low that it's useless.  In this comparison, we seek to
2293          * calculate the total heap-size, and reclaim if more than 3/4ths of the
2294          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2295          * free)
2296          */
2297         if (vmem_size(heap_arena, VMEM_FREE) <
2298             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2299                 return (1);
2300 #endif
2301 
2302         /*
2303          * If zio data pages are being allocated out of a separate heap segment,
2304          * then enforce that the size of available vmem for this arena remains
2305          * above about 1/16th free.
2306          *
2307          * Note: The 1/16th arena free requirement was put in place
2308          * to aggressively evict memory from the arc in order to avoid
2309          * memory fragmentation issues.
2310          */
2311         if (zio_arena != NULL &&
2312             vmem_size(zio_arena, VMEM_FREE) <
2313             (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2314                 return (1);
2315 #else
2316         if (spa_get_random(100) == 0)
2317                 return (1);
2318 #endif
2319         return (0);
2320 }
2321 
2322 static void
2323 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2324 {
2325         size_t                  i;
2326         kmem_cache_t            *prev_cache = NULL;
2327         kmem_cache_t            *prev_data_cache = NULL;
2328         extern kmem_cache_t     *zio_buf_cache[];
2329         extern kmem_cache_t     *zio_data_buf_cache[];
2330 
2331 #ifdef _KERNEL
2332         if (arc_meta_used >= arc_meta_limit) {
2333                 /*
2334                  * We are exceeding our meta-data cache limit.
2335                  * Purge some DNLC entries to release holds on meta-data.
2336                  */
2337                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2338         }
2339 #if defined(__i386)
2340         /*
2341          * Reclaim unused memory from all kmem caches.
2342          */
2343         kmem_reap();
2344 #endif
2345 #endif
2346 
2347         /*
2348          * An aggressive reclamation will shrink the cache size as well as
2349          * reap free buffers from the arc kmem caches.
2350          */
2351         if (strat == ARC_RECLAIM_AGGR)
2352                 arc_shrink();
2353 
2354         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2355                 if (zio_buf_cache[i] != prev_cache) {
2356                         prev_cache = zio_buf_cache[i];
2357                         kmem_cache_reap_now(zio_buf_cache[i]);
2358                 }
2359                 if (zio_data_buf_cache[i] != prev_data_cache) {
2360                         prev_data_cache = zio_data_buf_cache[i];
2361                         kmem_cache_reap_now(zio_data_buf_cache[i]);
2362                 }
2363         }
2364         kmem_cache_reap_now(buf_cache);
2365         kmem_cache_reap_now(hdr_cache);
2366 
2367         /*
2368          * Ask the vmem areana to reclaim unused memory from its
2369          * quantum caches.
2370          */
2371         if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2372                 vmem_qcache_reap(zio_arena);
2373 }
2374 
2375 static void
2376 arc_reclaim_thread(void)
2377 {
2378         clock_t                 growtime = 0;
2379         arc_reclaim_strategy_t  last_reclaim = ARC_RECLAIM_CONS;
2380         callb_cpr_t             cpr;
2381 
2382         CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2383 
2384         mutex_enter(&arc_reclaim_thr_lock);
2385         while (arc_thread_exit == 0) {
2386                 if (arc_reclaim_needed()) {
2387 
2388                         if (arc_no_grow) {
2389                                 if (last_reclaim == ARC_RECLAIM_CONS) {
2390                                         last_reclaim = ARC_RECLAIM_AGGR;
2391                                 } else {
2392                                         last_reclaim = ARC_RECLAIM_CONS;
2393                                 }
2394                         } else {
2395                                 arc_no_grow = TRUE;
2396                                 last_reclaim = ARC_RECLAIM_AGGR;
2397                                 membar_producer();
2398                         }
2399 
2400                         /* reset the growth delay for every reclaim */
2401                         growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2402 
2403                         arc_kmem_reap_now(last_reclaim);
2404                         arc_warm = B_TRUE;
2405 
2406                 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2407                         arc_no_grow = FALSE;
2408                 }
2409 
2410                 arc_adjust();
2411 
2412                 if (arc_eviction_list != NULL)
2413                         arc_do_user_evicts();
2414 
2415                 /* block until needed, or one second, whichever is shorter */
2416                 CALLB_CPR_SAFE_BEGIN(&cpr);
2417                 (void) cv_timedwait(&arc_reclaim_thr_cv,
2418                     &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2419                 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2420         }
2421 
2422         arc_thread_exit = 0;
2423         cv_broadcast(&arc_reclaim_thr_cv);
2424         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_thr_lock */
2425         thread_exit();
2426 }
2427 
2428 /*
2429  * Adapt arc info given the number of bytes we are trying to add and
2430  * the state that we are comming from.  This function is only called
2431  * when we are adding new content to the cache.
2432  */
2433 static void
2434 arc_adapt(int bytes, arc_state_t *state)
2435 {
2436         int mult;
2437         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2438 
2439         if (state == arc_l2c_only)
2440                 return;
2441 
2442         ASSERT(bytes > 0);
2443         /*
2444          * Adapt the target size of the MRU list:
2445          *      - if we just hit in the MRU ghost list, then increase
2446          *        the target size of the MRU list.
2447          *      - if we just hit in the MFU ghost list, then increase
2448          *        the target size of the MFU list by decreasing the
2449          *        target size of the MRU list.
2450          */
2451         if (state == arc_mru_ghost) {
2452                 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2453                     1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2454                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2455 
2456                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2457         } else if (state == arc_mfu_ghost) {
2458                 uint64_t delta;
2459 
2460                 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2461                     1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2462                 mult = MIN(mult, 10);
2463 
2464                 delta = MIN(bytes * mult, arc_p);
2465                 arc_p = MAX(arc_p_min, arc_p - delta);
2466         }
2467         ASSERT((int64_t)arc_p >= 0);
2468 
2469         if (arc_reclaim_needed()) {
2470                 cv_signal(&arc_reclaim_thr_cv);
2471                 return;
2472         }
2473 
2474         if (arc_no_grow)
2475                 return;
2476 
2477         if (arc_c >= arc_c_max)
2478                 return;
2479 
2480         /*
2481          * If we're within (2 * maxblocksize) bytes of the target
2482          * cache size, increment the target cache size
2483          */
2484         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2485                 atomic_add_64(&arc_c, (int64_t)bytes);
2486                 if (arc_c > arc_c_max)
2487                         arc_c = arc_c_max;
2488                 else if (state == arc_anon)
2489                         atomic_add_64(&arc_p, (int64_t)bytes);
2490                 if (arc_p > arc_c)
2491                         arc_p = arc_c;
2492         }
2493         ASSERT((int64_t)arc_p >= 0);
2494 }
2495 
2496 /*
2497  * Check if the cache has reached its limits and eviction is required
2498  * prior to insert.
2499  */
2500 static int
2501 arc_evict_needed(arc_buf_contents_t type)
2502 {
2503         if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2504                 return (1);
2505 
2506         if (arc_reclaim_needed())
2507                 return (1);
2508 
2509         return (arc_size > arc_c);
2510 }
2511 
2512 /*
2513  * The buffer, supplied as the first argument, needs a data block.
2514  * So, if we are at cache max, determine which cache should be victimized.
2515  * We have the following cases:
2516  *
2517  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2518  * In this situation if we're out of space, but the resident size of the MFU is
2519  * under the limit, victimize the MFU cache to satisfy this insertion request.
2520  *
2521  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2522  * Here, we've used up all of the available space for the MRU, so we need to
2523  * evict from our own cache instead.  Evict from the set of resident MRU
2524  * entries.
2525  *
2526  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2527  * c minus p represents the MFU space in the cache, since p is the size of the
2528  * cache that is dedicated to the MRU.  In this situation there's still space on
2529  * the MFU side, so the MRU side needs to be victimized.
2530  *
2531  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2532  * MFU's resident set is consuming more space than it has been allotted.  In
2533  * this situation, we must victimize our own cache, the MFU, for this insertion.
2534  */
2535 static void
2536 arc_get_data_buf(arc_buf_t *buf)
2537 {
2538         arc_state_t             *state = buf->b_hdr->b_state;
2539         uint64_t                size = buf->b_hdr->b_size;
2540         arc_buf_contents_t      type = buf->b_hdr->b_type;
2541 
2542         arc_adapt(size, state);
2543 
2544         /*
2545          * We have not yet reached cache maximum size,
2546          * just allocate a new buffer.
2547          */
2548         if (!arc_evict_needed(type)) {
2549                 if (type == ARC_BUFC_METADATA) {
2550                         buf->b_data = zio_buf_alloc(size);
2551                         arc_space_consume(size, ARC_SPACE_DATA);
2552                 } else {
2553                         ASSERT(type == ARC_BUFC_DATA);
2554                         buf->b_data = zio_data_buf_alloc(size);
2555                         ARCSTAT_INCR(arcstat_data_size, size);
2556                         atomic_add_64(&arc_size, size);
2557                 }
2558                 goto out;
2559         }
2560 
2561         /*
2562          * If we are prefetching from the mfu ghost list, this buffer
2563          * will end up on the mru list; so steal space from there.
2564          */
2565         if (state == arc_mfu_ghost)
2566                 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2567         else if (state == arc_mru_ghost)
2568                 state = arc_mru;
2569 
2570         if (state == arc_mru || state == arc_anon) {
2571                 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2572                 state = (arc_mfu->arcs_lsize[type] >= size &&
2573                     arc_p > mru_used) ? arc_mfu : arc_mru;
2574         } else {
2575                 /* MFU cases */
2576                 uint64_t mfu_space = arc_c - arc_p;
2577                 state =  (arc_mru->arcs_lsize[type] >= size &&
2578                     mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2579         }
2580         if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2581                 if (type == ARC_BUFC_METADATA) {
2582                         buf->b_data = zio_buf_alloc(size);
2583                         arc_space_consume(size, ARC_SPACE_DATA);
2584                 } else {
2585                         ASSERT(type == ARC_BUFC_DATA);
2586                         buf->b_data = zio_data_buf_alloc(size);
2587                         ARCSTAT_INCR(arcstat_data_size, size);
2588                         atomic_add_64(&arc_size, size);
2589                 }
2590                 ARCSTAT_BUMP(arcstat_recycle_miss);
2591         }
2592         ASSERT(buf->b_data != NULL);
2593 out:
2594         /*
2595          * Update the state size.  Note that ghost states have a
2596          * "ghost size" and so don't need to be updated.
2597          */
2598         if (!GHOST_STATE(buf->b_hdr->b_state)) {
2599                 arc_buf_hdr_t *hdr = buf->b_hdr;
2600 
2601                 atomic_add_64(&hdr->b_state->arcs_size, size);
2602                 if (list_link_active(&hdr->b_arc_node)) {
2603                         ASSERT(refcount_is_zero(&hdr->b_refcnt));
2604                         atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2605                 }
2606                 /*
2607                  * If we are growing the cache, and we are adding anonymous
2608                  * data, and we have outgrown arc_p, update arc_p
2609                  */
2610                 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2611                     arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2612                         arc_p = MIN(arc_c, arc_p + size);
2613         }
2614 }
2615 
2616 /*
2617  * This routine is called whenever a buffer is accessed.
2618  * NOTE: the hash lock is dropped in this function.
2619  */
2620 static void
2621 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2622 {
2623         clock_t now;
2624 
2625         ASSERT(MUTEX_HELD(hash_lock));
2626 
2627         if (buf->b_state == arc_anon) {
2628                 /*
2629                  * This buffer is not in the cache, and does not
2630                  * appear in our "ghost" list.  Add the new buffer
2631                  * to the MRU state.
2632                  */
2633 
2634                 ASSERT(buf->b_arc_access == 0);
2635                 buf->b_arc_access = ddi_get_lbolt();
2636                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2637                 arc_change_state(arc_mru, buf, hash_lock);
2638 
2639         } else if (buf->b_state == arc_mru) {
2640                 now = ddi_get_lbolt();
2641 
2642                 /*
2643                  * If this buffer is here because of a prefetch, then either:
2644                  * - clear the flag if this is a "referencing" read
2645                  *   (any subsequent access will bump this into the MFU state).
2646                  * or
2647                  * - move the buffer to the head of the list if this is
2648                  *   another prefetch (to make it less likely to be evicted).
2649                  */
2650                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2651                         if (refcount_count(&buf->b_refcnt) == 0) {
2652                                 ASSERT(list_link_active(&buf->b_arc_node));
2653                         } else {
2654                                 buf->b_flags &= ~ARC_PREFETCH;
2655                                 ARCSTAT_BUMP(arcstat_mru_hits);
2656                         }
2657                         buf->b_arc_access = now;
2658                         return;
2659                 }
2660 
2661                 /*
2662                  * This buffer has been "accessed" only once so far,
2663                  * but it is still in the cache. Move it to the MFU
2664                  * state.
2665                  */
2666                 if (now > buf->b_arc_access + ARC_MINTIME) {
2667                         /*
2668                          * More than 125ms have passed since we
2669                          * instantiated this buffer.  Move it to the
2670                          * most frequently used state.
2671                          */
2672                         buf->b_arc_access = now;
2673                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2674                         arc_change_state(arc_mfu, buf, hash_lock);
2675                 }
2676                 ARCSTAT_BUMP(arcstat_mru_hits);
2677         } else if (buf->b_state == arc_mru_ghost) {
2678                 arc_state_t     *new_state;
2679                 /*
2680                  * This buffer has been "accessed" recently, but
2681                  * was evicted from the cache.  Move it to the
2682                  * MFU state.
2683                  */
2684 
2685                 if (buf->b_flags & ARC_PREFETCH) {
2686                         new_state = arc_mru;
2687                         if (refcount_count(&buf->b_refcnt) > 0)
2688                                 buf->b_flags &= ~ARC_PREFETCH;
2689                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2690                 } else {
2691                         new_state = arc_mfu;
2692                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2693                 }
2694 
2695                 buf->b_arc_access = ddi_get_lbolt();
2696                 arc_change_state(new_state, buf, hash_lock);
2697 
2698                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2699         } else if (buf->b_state == arc_mfu) {
2700                 /*
2701                  * This buffer has been accessed more than once and is
2702                  * still in the cache.  Keep it in the MFU state.
2703                  *
2704                  * NOTE: an add_reference() that occurred when we did
2705                  * the arc_read() will have kicked this off the list.
2706                  * If it was a prefetch, we will explicitly move it to
2707                  * the head of the list now.
2708                  */
2709                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2710                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2711                         ASSERT(list_link_active(&buf->b_arc_node));
2712                 }
2713                 ARCSTAT_BUMP(arcstat_mfu_hits);
2714                 buf->b_arc_access = ddi_get_lbolt();
2715         } else if (buf->b_state == arc_mfu_ghost) {
2716                 arc_state_t     *new_state = arc_mfu;
2717                 /*
2718                  * This buffer has been accessed more than once but has
2719                  * been evicted from the cache.  Move it back to the
2720                  * MFU state.
2721                  */
2722 
2723                 if (buf->b_flags & ARC_PREFETCH) {
2724                         /*
2725                          * This is a prefetch access...
2726                          * move this block back to the MRU state.
2727                          */
2728                         ASSERT0(refcount_count(&buf->b_refcnt));
2729                         new_state = arc_mru;
2730                 }
2731 
2732                 buf->b_arc_access = ddi_get_lbolt();
2733                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2734                 arc_change_state(new_state, buf, hash_lock);
2735 
2736                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2737         } else if (buf->b_state == arc_l2c_only) {
2738                 /*
2739                  * This buffer is on the 2nd Level ARC.
2740                  */
2741 
2742                 buf->b_arc_access = ddi_get_lbolt();
2743                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2744                 arc_change_state(arc_mfu, buf, hash_lock);
2745         } else {
2746                 ASSERT(!"invalid arc state");
2747         }
2748 }
2749 
2750 /* a generic arc_done_func_t which you can use */
2751 /* ARGSUSED */
2752 void
2753 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2754 {
2755         if (zio == NULL || zio->io_error == 0)
2756                 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2757         VERIFY(arc_buf_remove_ref(buf, arg));
2758 }
2759 
2760 /* a generic arc_done_func_t */
2761 void
2762 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2763 {
2764         arc_buf_t **bufp = arg;
2765         if (zio && zio->io_error) {
2766                 VERIFY(arc_buf_remove_ref(buf, arg));
2767                 *bufp = NULL;
2768         } else {
2769                 *bufp = buf;
2770                 ASSERT(buf->b_data);
2771         }
2772 }
2773 
2774 static void
2775 arc_read_done(zio_t *zio)
2776 {
2777         arc_buf_hdr_t   *hdr, *found;
2778         arc_buf_t       *buf;
2779         arc_buf_t       *abuf;  /* buffer we're assigning to callback */
2780         kmutex_t        *hash_lock;
2781         arc_callback_t  *callback_list, *acb;
2782         int             freeable = FALSE;
2783 
2784         buf = zio->io_private;
2785         hdr = buf->b_hdr;
2786 
2787         /*
2788          * The hdr was inserted into hash-table and removed from lists
2789          * prior to starting I/O.  We should find this header, since
2790          * it's in the hash table, and it should be legit since it's
2791          * not possible to evict it during the I/O.  The only possible
2792          * reason for it not to be found is if we were freed during the
2793          * read.
2794          */
2795         found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2796             &hash_lock);
2797 
2798         ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2799             (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2800             (found == hdr && HDR_L2_READING(hdr)));
2801 
2802         hdr->b_flags &= ~ARC_L2_EVICTED;
2803         if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2804                 hdr->b_flags &= ~ARC_L2CACHE;
2805 
2806         /* byteswap if necessary */
2807         callback_list = hdr->b_acb;
2808         ASSERT(callback_list != NULL);
2809         if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2810                 dmu_object_byteswap_t bswap =
2811                     DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2812                 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2813                     byteswap_uint64_array :
2814                     dmu_ot_byteswap[bswap].ob_func;
2815                 func(buf->b_data, hdr->b_size);
2816         }
2817 
2818         arc_cksum_compute(buf, B_FALSE);
2819         arc_buf_watch(buf);
2820 
2821         if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2822                 /*
2823                  * Only call arc_access on anonymous buffers.  This is because
2824                  * if we've issued an I/O for an evicted buffer, we've already
2825                  * called arc_access (to prevent any simultaneous readers from
2826                  * getting confused).
2827                  */
2828                 arc_access(hdr, hash_lock);
2829         }
2830 
2831         /* create copies of the data buffer for the callers */
2832         abuf = buf;
2833         for (acb = callback_list; acb; acb = acb->acb_next) {
2834                 if (acb->acb_done) {
2835                         if (abuf == NULL) {
2836                                 ARCSTAT_BUMP(arcstat_duplicate_reads);
2837                                 abuf = arc_buf_clone(buf);
2838                         }
2839                         acb->acb_buf = abuf;
2840                         abuf = NULL;
2841                 }
2842         }
2843         hdr->b_acb = NULL;
2844         hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2845         ASSERT(!HDR_BUF_AVAILABLE(hdr));
2846         if (abuf == buf) {
2847                 ASSERT(buf->b_efunc == NULL);
2848                 ASSERT(hdr->b_datacnt == 1);
2849                 hdr->b_flags |= ARC_BUF_AVAILABLE;
2850         }
2851 
2852         ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2853 
2854         if (zio->io_error != 0) {
2855                 hdr->b_flags |= ARC_IO_ERROR;
2856                 if (hdr->b_state != arc_anon)
2857                         arc_change_state(arc_anon, hdr, hash_lock);
2858                 if (HDR_IN_HASH_TABLE(hdr))
2859                         buf_hash_remove(hdr);
2860                 freeable = refcount_is_zero(&hdr->b_refcnt);
2861         }
2862 
2863         /*
2864          * Broadcast before we drop the hash_lock to avoid the possibility
2865          * that the hdr (and hence the cv) might be freed before we get to
2866          * the cv_broadcast().
2867          */
2868         cv_broadcast(&hdr->b_cv);
2869 
2870         if (hash_lock) {
2871                 mutex_exit(hash_lock);
2872         } else {
2873                 /*
2874                  * This block was freed while we waited for the read to
2875                  * complete.  It has been removed from the hash table and
2876                  * moved to the anonymous state (so that it won't show up
2877                  * in the cache).
2878                  */
2879                 ASSERT3P(hdr->b_state, ==, arc_anon);
2880                 freeable = refcount_is_zero(&hdr->b_refcnt);
2881         }
2882 
2883         /* execute each callback and free its structure */
2884         while ((acb = callback_list) != NULL) {
2885                 if (acb->acb_done)
2886                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2887 
2888                 if (acb->acb_zio_dummy != NULL) {
2889                         acb->acb_zio_dummy->io_error = zio->io_error;
2890                         zio_nowait(acb->acb_zio_dummy);
2891                 }
2892 
2893                 callback_list = acb->acb_next;
2894                 kmem_free(acb, sizeof (arc_callback_t));
2895         }
2896 
2897         if (freeable)
2898                 arc_hdr_destroy(hdr);
2899 }
2900 
2901 /*
2902  * "Read" the block at the specified DVA (in bp) via the
2903  * cache.  If the block is found in the cache, invoke the provided
2904  * callback immediately and return.  Note that the `zio' parameter
2905  * in the callback will be NULL in this case, since no IO was
2906  * required.  If the block is not in the cache pass the read request
2907  * on to the spa with a substitute callback function, so that the
2908  * requested block will be added to the cache.
2909  *
2910  * If a read request arrives for a block that has a read in-progress,
2911  * either wait for the in-progress read to complete (and return the
2912  * results); or, if this is a read with a "done" func, add a record
2913  * to the read to invoke the "done" func when the read completes,
2914  * and return; or just return.
2915  *
2916  * arc_read_done() will invoke all the requested "done" functions
2917  * for readers of this block.
2918  */
2919 int
2920 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2921     void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
2922     const zbookmark_t *zb)
2923 {
2924         arc_buf_hdr_t *hdr;
2925         arc_buf_t *buf = NULL;
2926         kmutex_t *hash_lock;
2927         zio_t *rzio;
2928         uint64_t guid = spa_load_guid(spa);
2929 
2930 top:
2931         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2932             &hash_lock);
2933         if (hdr && hdr->b_datacnt > 0) {
2934 
2935                 *arc_flags |= ARC_CACHED;
2936 
2937                 if (HDR_IO_IN_PROGRESS(hdr)) {
2938 
2939                         if (*arc_flags & ARC_WAIT) {
2940                                 cv_wait(&hdr->b_cv, hash_lock);
2941                                 mutex_exit(hash_lock);
2942                                 goto top;
2943                         }
2944                         ASSERT(*arc_flags & ARC_NOWAIT);
2945 
2946                         if (done) {
2947                                 arc_callback_t  *acb = NULL;
2948 
2949                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2950                                     KM_SLEEP);
2951                                 acb->acb_done = done;
2952                                 acb->acb_private = private;
2953                                 if (pio != NULL)
2954                                         acb->acb_zio_dummy = zio_null(pio,
2955                                             spa, NULL, NULL, NULL, zio_flags);
2956 
2957                                 ASSERT(acb->acb_done != NULL);
2958                                 acb->acb_next = hdr->b_acb;
2959                                 hdr->b_acb = acb;
2960                                 add_reference(hdr, hash_lock, private);
2961                                 mutex_exit(hash_lock);
2962                                 return (0);
2963                         }
2964                         mutex_exit(hash_lock);
2965                         return (0);
2966                 }
2967 
2968                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2969 
2970                 if (done) {
2971                         add_reference(hdr, hash_lock, private);
2972                         /*
2973                          * If this block is already in use, create a new
2974                          * copy of the data so that we will be guaranteed
2975                          * that arc_release() will always succeed.
2976                          */
2977                         buf = hdr->b_buf;
2978                         ASSERT(buf);
2979                         ASSERT(buf->b_data);
2980                         if (HDR_BUF_AVAILABLE(hdr)) {
2981                                 ASSERT(buf->b_efunc == NULL);
2982                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2983                         } else {
2984                                 buf = arc_buf_clone(buf);
2985                         }
2986 
2987                 } else if (*arc_flags & ARC_PREFETCH &&
2988                     refcount_count(&hdr->b_refcnt) == 0) {
2989                         hdr->b_flags |= ARC_PREFETCH;
2990                 }
2991                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2992                 arc_access(hdr, hash_lock);
2993                 if (*arc_flags & ARC_L2CACHE)
2994                         hdr->b_flags |= ARC_L2CACHE;
2995                 if (*arc_flags & ARC_L2COMPRESS)
2996                         hdr->b_flags |= ARC_L2COMPRESS;
2997                 mutex_exit(hash_lock);
2998                 ARCSTAT_BUMP(arcstat_hits);
2999                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3000                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3001                     data, metadata, hits);
3002 
3003                 if (done)
3004                         done(NULL, buf, private);
3005         } else {
3006                 uint64_t size = BP_GET_LSIZE(bp);
3007                 arc_callback_t  *acb;
3008                 vdev_t *vd = NULL;
3009                 uint64_t addr = 0;
3010                 boolean_t devw = B_FALSE;
3011 
3012                 if (hdr == NULL) {
3013                         /* this block is not in the cache */
3014                         arc_buf_hdr_t   *exists;
3015                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3016                         buf = arc_buf_alloc(spa, size, private, type);
3017                         hdr = buf->b_hdr;
3018                         hdr->b_dva = *BP_IDENTITY(bp);
3019                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3020                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3021                         exists = buf_hash_insert(hdr, &hash_lock);
3022                         if (exists) {
3023                                 /* somebody beat us to the hash insert */
3024                                 mutex_exit(hash_lock);
3025                                 buf_discard_identity(hdr);
3026                                 (void) arc_buf_remove_ref(buf, private);
3027                                 goto top; /* restart the IO request */
3028                         }
3029                         /* if this is a prefetch, we don't have a reference */
3030                         if (*arc_flags & ARC_PREFETCH) {
3031                                 (void) remove_reference(hdr, hash_lock,
3032                                     private);
3033                                 hdr->b_flags |= ARC_PREFETCH;
3034                         }
3035                         if (*arc_flags & ARC_L2CACHE)
3036                                 hdr->b_flags |= ARC_L2CACHE;
3037                         if (*arc_flags & ARC_L2COMPRESS)
3038                                 hdr->b_flags |= ARC_L2COMPRESS;
3039                         if (BP_GET_LEVEL(bp) > 0)
3040                                 hdr->b_flags |= ARC_INDIRECT;
3041                 } else {
3042                         /* this block is in the ghost cache */
3043                         ASSERT(GHOST_STATE(hdr->b_state));
3044                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3045                         ASSERT0(refcount_count(&hdr->b_refcnt));
3046                         ASSERT(hdr->b_buf == NULL);
3047 
3048                         /* if this is a prefetch, we don't have a reference */
3049                         if (*arc_flags & ARC_PREFETCH)
3050                                 hdr->b_flags |= ARC_PREFETCH;
3051                         else
3052                                 add_reference(hdr, hash_lock, private);
3053                         if (*arc_flags & ARC_L2CACHE)
3054                                 hdr->b_flags |= ARC_L2CACHE;
3055                         if (*arc_flags & ARC_L2COMPRESS)
3056                                 hdr->b_flags |= ARC_L2COMPRESS;
3057                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3058                         buf->b_hdr = hdr;
3059                         buf->b_data = NULL;
3060                         buf->b_efunc = NULL;
3061                         buf->b_private = NULL;
3062                         buf->b_next = NULL;
3063                         hdr->b_buf = buf;
3064                         ASSERT(hdr->b_datacnt == 0);
3065                         hdr->b_datacnt = 1;
3066                         arc_get_data_buf(buf);
3067                         arc_access(hdr, hash_lock);
3068                 }
3069 
3070                 ASSERT(!GHOST_STATE(hdr->b_state));
3071 
3072                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3073                 acb->acb_done = done;
3074                 acb->acb_private = private;
3075 
3076                 ASSERT(hdr->b_acb == NULL);
3077                 hdr->b_acb = acb;
3078                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3079 
3080                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
3081                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3082                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3083                         addr = hdr->b_l2hdr->b_daddr;
3084                         /*
3085                          * Lock out device removal.
3086                          */
3087                         if (vdev_is_dead(vd) ||
3088                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3089                                 vd = NULL;
3090                 }
3091 
3092                 mutex_exit(hash_lock);
3093 
3094                 /*
3095                  * At this point, we have a level 1 cache miss.  Try again in
3096                  * L2ARC if possible.
3097                  */
3098                 ASSERT3U(hdr->b_size, ==, size);
3099                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3100                     uint64_t, size, zbookmark_t *, zb);
3101                 ARCSTAT_BUMP(arcstat_misses);
3102                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3103                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3104                     data, metadata, misses);
3105 
3106                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3107                         /*
3108                          * Read from the L2ARC if the following are true:
3109                          * 1. The L2ARC vdev was previously cached.
3110                          * 2. This buffer still has L2ARC metadata.
3111                          * 3. This buffer isn't currently writing to the L2ARC.
3112                          * 4. The L2ARC entry wasn't evicted, which may
3113                          *    also have invalidated the vdev.
3114                          * 5. This isn't prefetch and l2arc_noprefetch is set.
3115                          */
3116                         if (hdr->b_l2hdr != NULL &&
3117                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3118                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3119                                 l2arc_read_callback_t *cb;
3120 
3121                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3122                                 ARCSTAT_BUMP(arcstat_l2_hits);
3123 
3124                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3125                                     KM_SLEEP);
3126                                 cb->l2rcb_buf = buf;
3127                                 cb->l2rcb_spa = spa;
3128                                 cb->l2rcb_bp = *bp;
3129                                 cb->l2rcb_zb = *zb;
3130                                 cb->l2rcb_flags = zio_flags;
3131                                 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3132 
3133                                 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3134                                     addr + size < vd->vdev_psize -
3135                                     VDEV_LABEL_END_SIZE);
3136 
3137                                 /*
3138                                  * l2arc read.  The SCL_L2ARC lock will be
3139                                  * released by l2arc_read_done().
3140                                  * Issue a null zio if the underlying buffer
3141                                  * was squashed to zero size by compression.
3142                                  */
3143                                 if (hdr->b_l2hdr->b_compress ==
3144                                     ZIO_COMPRESS_EMPTY) {
3145                                         rzio = zio_null(pio, spa, vd,
3146                                             l2arc_read_done, cb,
3147                                             zio_flags | ZIO_FLAG_DONT_CACHE |
3148                                             ZIO_FLAG_CANFAIL |
3149                                             ZIO_FLAG_DONT_PROPAGATE |
3150                                             ZIO_FLAG_DONT_RETRY);
3151                                 } else {
3152                                         rzio = zio_read_phys(pio, vd, addr,
3153                                             hdr->b_l2hdr->b_asize,
3154                                             buf->b_data, ZIO_CHECKSUM_OFF,
3155                                             l2arc_read_done, cb, priority,
3156                                             zio_flags | ZIO_FLAG_DONT_CACHE |
3157                                             ZIO_FLAG_CANFAIL |
3158                                             ZIO_FLAG_DONT_PROPAGATE |
3159                                             ZIO_FLAG_DONT_RETRY, B_FALSE);
3160                                 }
3161                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3162                                     zio_t *, rzio);
3163                                 ARCSTAT_INCR(arcstat_l2_read_bytes,
3164                                     hdr->b_l2hdr->b_asize);
3165 
3166                                 if (*arc_flags & ARC_NOWAIT) {
3167                                         zio_nowait(rzio);
3168                                         return (0);
3169                                 }
3170 
3171                                 ASSERT(*arc_flags & ARC_WAIT);
3172                                 if (zio_wait(rzio) == 0)
3173                                         return (0);
3174 
3175                                 /* l2arc read error; goto zio_read() */
3176                         } else {
3177                                 DTRACE_PROBE1(l2arc__miss,
3178                                     arc_buf_hdr_t *, hdr);
3179                                 ARCSTAT_BUMP(arcstat_l2_misses);
3180                                 if (HDR_L2_WRITING(hdr))
3181                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
3182                                 spa_config_exit(spa, SCL_L2ARC, vd);
3183                         }
3184                 } else {
3185                         if (vd != NULL)
3186                                 spa_config_exit(spa, SCL_L2ARC, vd);
3187                         if (l2arc_ndev != 0) {
3188                                 DTRACE_PROBE1(l2arc__miss,
3189                                     arc_buf_hdr_t *, hdr);
3190                                 ARCSTAT_BUMP(arcstat_l2_misses);
3191                         }
3192                 }
3193 
3194                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3195                     arc_read_done, buf, priority, zio_flags, zb);
3196 
3197                 if (*arc_flags & ARC_WAIT)
3198                         return (zio_wait(rzio));
3199 
3200                 ASSERT(*arc_flags & ARC_NOWAIT);
3201                 zio_nowait(rzio);
3202         }
3203         return (0);
3204 }
3205 
3206 void
3207 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3208 {
3209         ASSERT(buf->b_hdr != NULL);
3210         ASSERT(buf->b_hdr->b_state != arc_anon);
3211         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3212         ASSERT(buf->b_efunc == NULL);
3213         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3214 
3215         buf->b_efunc = func;
3216         buf->b_private = private;
3217 }
3218 
3219 /*
3220  * Notify the arc that a block was freed, and thus will never be used again.
3221  */
3222 void
3223 arc_freed(spa_t *spa, const blkptr_t *bp)
3224 {
3225         arc_buf_hdr_t *hdr;
3226         kmutex_t *hash_lock;
3227         uint64_t guid = spa_load_guid(spa);
3228 
3229         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3230             &hash_lock);
3231         if (hdr == NULL)
3232                 return;
3233         if (HDR_BUF_AVAILABLE(hdr)) {
3234                 arc_buf_t *buf = hdr->b_buf;
3235                 add_reference(hdr, hash_lock, FTAG);
3236                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3237                 mutex_exit(hash_lock);
3238 
3239                 arc_release(buf, FTAG);
3240                 (void) arc_buf_remove_ref(buf, FTAG);
3241         } else {
3242                 mutex_exit(hash_lock);
3243         }
3244 
3245 }
3246 
3247 /*
3248  * This is used by the DMU to let the ARC know that a buffer is
3249  * being evicted, so the ARC should clean up.  If this arc buf
3250  * is not yet in the evicted state, it will be put there.
3251  */
3252 int
3253 arc_buf_evict(arc_buf_t *buf)
3254 {
3255         arc_buf_hdr_t *hdr;
3256         kmutex_t *hash_lock;
3257         arc_buf_t **bufp;
3258 
3259         mutex_enter(&buf->b_evict_lock);
3260         hdr = buf->b_hdr;
3261         if (hdr == NULL) {
3262                 /*
3263                  * We are in arc_do_user_evicts().
3264                  */
3265                 ASSERT(buf->b_data == NULL);
3266                 mutex_exit(&buf->b_evict_lock);
3267                 return (0);
3268         } else if (buf->b_data == NULL) {
3269                 arc_buf_t copy = *buf; /* structure assignment */
3270                 /*
3271                  * We are on the eviction list; process this buffer now
3272                  * but let arc_do_user_evicts() do the reaping.
3273                  */
3274                 buf->b_efunc = NULL;
3275                 mutex_exit(&buf->b_evict_lock);
3276                 VERIFY(copy.b_efunc(&copy) == 0);
3277                 return (1);
3278         }
3279         hash_lock = HDR_LOCK(hdr);
3280         mutex_enter(hash_lock);
3281         hdr = buf->b_hdr;
3282         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3283 
3284         ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3285         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3286 
3287         /*
3288          * Pull this buffer off of the hdr
3289          */
3290         bufp = &hdr->b_buf;
3291         while (*bufp != buf)
3292                 bufp = &(*bufp)->b_next;
3293         *bufp = buf->b_next;
3294 
3295         ASSERT(buf->b_data != NULL);
3296         arc_buf_destroy(buf, FALSE, FALSE);
3297 
3298         if (hdr->b_datacnt == 0) {
3299                 arc_state_t *old_state = hdr->b_state;
3300                 arc_state_t *evicted_state;
3301 
3302                 ASSERT(hdr->b_buf == NULL);
3303                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3304 
3305                 evicted_state =
3306                     (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3307 
3308                 mutex_enter(&old_state->arcs_mtx);
3309                 mutex_enter(&evicted_state->arcs_mtx);
3310 
3311                 arc_change_state(evicted_state, hdr, hash_lock);
3312                 ASSERT(HDR_IN_HASH_TABLE(hdr));
3313                 hdr->b_flags |= ARC_IN_HASH_TABLE;
3314                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3315 
3316                 mutex_exit(&evicted_state->arcs_mtx);
3317                 mutex_exit(&old_state->arcs_mtx);
3318         }
3319         mutex_exit(hash_lock);
3320         mutex_exit(&buf->b_evict_lock);
3321 
3322         VERIFY(buf->b_efunc(buf) == 0);
3323         buf->b_efunc = NULL;
3324         buf->b_private = NULL;
3325         buf->b_hdr = NULL;
3326         buf->b_next = NULL;
3327         kmem_cache_free(buf_cache, buf);
3328         return (1);
3329 }
3330 
3331 /*
3332  * Release this buffer from the cache, making it an anonymous buffer.  This
3333  * must be done after a read and prior to modifying the buffer contents.
3334  * If the buffer has more than one reference, we must make
3335  * a new hdr for the buffer.
3336  */
3337 void
3338 arc_release(arc_buf_t *buf, void *tag)
3339 {
3340         arc_buf_hdr_t *hdr;
3341         kmutex_t *hash_lock = NULL;
3342         l2arc_buf_hdr_t *l2hdr;
3343         uint64_t buf_size;
3344 
3345         /*
3346          * It would be nice to assert that if it's DMU metadata (level >
3347          * 0 || it's the dnode file), then it must be syncing context.
3348          * But we don't know that information at this level.
3349          */
3350 
3351         mutex_enter(&buf->b_evict_lock);
3352         hdr = buf->b_hdr;
3353 
3354         /* this buffer is not on any list */
3355         ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3356 
3357         if (hdr->b_state == arc_anon) {
3358                 /* this buffer is already released */
3359                 ASSERT(buf->b_efunc == NULL);
3360         } else {
3361                 hash_lock = HDR_LOCK(hdr);
3362                 mutex_enter(hash_lock);
3363                 hdr = buf->b_hdr;
3364                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3365         }
3366 
3367         l2hdr = hdr->b_l2hdr;
3368         if (l2hdr) {
3369                 mutex_enter(&l2arc_buflist_mtx);
3370                 hdr->b_l2hdr = NULL;
3371         }
3372         buf_size = hdr->b_size;
3373 
3374         /*
3375          * Do we have more than one buf?
3376          */
3377         if (hdr->b_datacnt > 1) {
3378                 arc_buf_hdr_t *nhdr;
3379                 arc_buf_t **bufp;
3380                 uint64_t blksz = hdr->b_size;
3381                 uint64_t spa = hdr->b_spa;
3382                 arc_buf_contents_t type = hdr->b_type;
3383                 uint32_t flags = hdr->b_flags;
3384 
3385                 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3386                 /*
3387                  * Pull the data off of this hdr and attach it to
3388                  * a new anonymous hdr.
3389                  */
3390                 (void) remove_reference(hdr, hash_lock, tag);
3391                 bufp = &hdr->b_buf;
3392                 while (*bufp != buf)
3393                         bufp = &(*bufp)->b_next;
3394                 *bufp = buf->b_next;
3395                 buf->b_next = NULL;
3396 
3397                 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3398                 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3399                 if (refcount_is_zero(&hdr->b_refcnt)) {
3400                         uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3401                         ASSERT3U(*size, >=, hdr->b_size);
3402                         atomic_add_64(size, -hdr->b_size);
3403                 }
3404 
3405                 /*
3406                  * We're releasing a duplicate user data buffer, update
3407                  * our statistics accordingly.
3408                  */
3409                 if (hdr->b_type == ARC_BUFC_DATA) {
3410                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3411                         ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3412                             -hdr->b_size);
3413                 }
3414                 hdr->b_datacnt -= 1;
3415                 arc_cksum_verify(buf);
3416                 arc_buf_unwatch(buf);
3417 
3418                 mutex_exit(hash_lock);
3419 
3420                 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3421                 nhdr->b_size = blksz;
3422                 nhdr->b_spa = spa;
3423                 nhdr->b_type = type;
3424                 nhdr->b_buf = buf;
3425                 nhdr->b_state = arc_anon;
3426                 nhdr->b_arc_access = 0;
3427                 nhdr->b_flags = flags & ARC_L2_WRITING;
3428                 nhdr->b_l2hdr = NULL;
3429                 nhdr->b_datacnt = 1;
3430                 nhdr->b_freeze_cksum = NULL;
3431                 (void) refcount_add(&nhdr->b_refcnt, tag);
3432                 buf->b_hdr = nhdr;
3433                 mutex_exit(&buf->b_evict_lock);
3434                 atomic_add_64(&arc_anon->arcs_size, blksz);
3435         } else {
3436                 mutex_exit(&buf->b_evict_lock);
3437                 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3438                 ASSERT(!list_link_active(&hdr->b_arc_node));
3439                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3440                 if (hdr->b_state != arc_anon)
3441                         arc_change_state(arc_anon, hdr, hash_lock);
3442                 hdr->b_arc_access = 0;
3443                 if (hash_lock)
3444                         mutex_exit(hash_lock);
3445 
3446                 buf_discard_identity(hdr);
3447                 arc_buf_thaw(buf);
3448         }
3449         buf->b_efunc = NULL;
3450         buf->b_private = NULL;
3451 
3452         if (l2hdr) {
3453                 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3454                 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3455                 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3456                 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3457                 mutex_exit(&l2arc_buflist_mtx);
3458         }
3459 }
3460 
3461 int
3462 arc_released(arc_buf_t *buf)
3463 {
3464         int released;
3465 
3466         mutex_enter(&buf->b_evict_lock);
3467         released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3468         mutex_exit(&buf->b_evict_lock);
3469         return (released);
3470 }
3471 
3472 int
3473 arc_has_callback(arc_buf_t *buf)
3474 {
3475         int callback;
3476 
3477         mutex_enter(&buf->b_evict_lock);
3478         callback = (buf->b_efunc != NULL);
3479         mutex_exit(&buf->b_evict_lock);
3480         return (callback);
3481 }
3482 
3483 #ifdef ZFS_DEBUG
3484 int
3485 arc_referenced(arc_buf_t *buf)
3486 {
3487         int referenced;
3488 
3489         mutex_enter(&buf->b_evict_lock);
3490         referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3491         mutex_exit(&buf->b_evict_lock);
3492         return (referenced);
3493 }
3494 #endif
3495 
3496 static void
3497 arc_write_ready(zio_t *zio)
3498 {
3499         arc_write_callback_t *callback = zio->io_private;
3500         arc_buf_t *buf = callback->awcb_buf;
3501         arc_buf_hdr_t *hdr = buf->b_hdr;
3502 
3503         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3504         callback->awcb_ready(zio, buf, callback->awcb_private);
3505 
3506         /*
3507          * If the IO is already in progress, then this is a re-write
3508          * attempt, so we need to thaw and re-compute the cksum.
3509          * It is the responsibility of the callback to handle the
3510          * accounting for any re-write attempt.
3511          */
3512         if (HDR_IO_IN_PROGRESS(hdr)) {
3513                 mutex_enter(&hdr->b_freeze_lock);
3514                 if (hdr->b_freeze_cksum != NULL) {
3515                         kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3516                         hdr->b_freeze_cksum = NULL;
3517                 }
3518                 mutex_exit(&hdr->b_freeze_lock);
3519         }
3520         arc_cksum_compute(buf, B_FALSE);
3521         hdr->b_flags |= ARC_IO_IN_PROGRESS;
3522 }
3523 
3524 /*
3525  * The SPA calls this callback for each physical write that happens on behalf
3526  * of a logical write.  See the comment in dbuf_write_physdone() for details.
3527  */
3528 static void
3529 arc_write_physdone(zio_t *zio)
3530 {
3531         arc_write_callback_t *cb = zio->io_private;
3532         if (cb->awcb_physdone != NULL)
3533                 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3534 }
3535 
3536 static void
3537 arc_write_done(zio_t *zio)
3538 {
3539         arc_write_callback_t *callback = zio->io_private;
3540         arc_buf_t *buf = callback->awcb_buf;
3541         arc_buf_hdr_t *hdr = buf->b_hdr;
3542 
3543         ASSERT(hdr->b_acb == NULL);
3544 
3545         if (zio->io_error == 0) {
3546                 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3547                 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3548                 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3549         } else {
3550                 ASSERT(BUF_EMPTY(hdr));
3551         }
3552 
3553         /*
3554          * If the block to be written was all-zero, we may have
3555          * compressed it away.  In this case no write was performed
3556          * so there will be no dva/birth/checksum.  The buffer must
3557          * therefore remain anonymous (and uncached).
3558          */
3559         if (!BUF_EMPTY(hdr)) {
3560                 arc_buf_hdr_t *exists;
3561                 kmutex_t *hash_lock;
3562 
3563                 ASSERT(zio->io_error == 0);
3564 
3565                 arc_cksum_verify(buf);
3566 
3567                 exists = buf_hash_insert(hdr, &hash_lock);
3568                 if (exists) {
3569                         /*
3570                          * This can only happen if we overwrite for
3571                          * sync-to-convergence, because we remove
3572                          * buffers from the hash table when we arc_free().
3573                          */
3574                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3575                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3576                                         panic("bad overwrite, hdr=%p exists=%p",
3577                                             (void *)hdr, (void *)exists);
3578                                 ASSERT(refcount_is_zero(&exists->b_refcnt));
3579                                 arc_change_state(arc_anon, exists, hash_lock);
3580                                 mutex_exit(hash_lock);
3581                                 arc_hdr_destroy(exists);
3582                                 exists = buf_hash_insert(hdr, &hash_lock);
3583                                 ASSERT3P(exists, ==, NULL);
3584                         } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3585                                 /* nopwrite */
3586                                 ASSERT(zio->io_prop.zp_nopwrite);
3587                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3588                                         panic("bad nopwrite, hdr=%p exists=%p",
3589                                             (void *)hdr, (void *)exists);
3590                         } else {
3591                                 /* Dedup */
3592                                 ASSERT(hdr->b_datacnt == 1);
3593                                 ASSERT(hdr->b_state == arc_anon);
3594                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
3595                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3596                         }
3597                 }
3598                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3599                 /* if it's not anon, we are doing a scrub */
3600                 if (!exists && hdr->b_state == arc_anon)
3601                         arc_access(hdr, hash_lock);
3602                 mutex_exit(hash_lock);
3603         } else {
3604                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3605         }
3606 
3607         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3608         callback->awcb_done(zio, buf, callback->awcb_private);
3609 
3610         kmem_free(callback, sizeof (arc_write_callback_t));
3611 }
3612 
3613 zio_t *
3614 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3615     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3616     const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3617     arc_done_func_t *done, void *private, zio_priority_t priority,
3618     int zio_flags, const zbookmark_t *zb)
3619 {
3620         arc_buf_hdr_t *hdr = buf->b_hdr;
3621         arc_write_callback_t *callback;
3622         zio_t *zio;
3623 
3624         ASSERT(ready != NULL);
3625         ASSERT(done != NULL);
3626         ASSERT(!HDR_IO_ERROR(hdr));
3627         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3628         ASSERT(hdr->b_acb == NULL);
3629         if (l2arc)
3630                 hdr->b_flags |= ARC_L2CACHE;
3631         if (l2arc_compress)
3632                 hdr->b_flags |= ARC_L2COMPRESS;
3633         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3634         callback->awcb_ready = ready;
3635         callback->awcb_physdone = physdone;
3636         callback->awcb_done = done;
3637         callback->awcb_private = private;
3638         callback->awcb_buf = buf;
3639 
3640         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3641             arc_write_ready, arc_write_physdone, arc_write_done, callback,
3642             priority, zio_flags, zb);
3643 
3644         return (zio);
3645 }
3646 
3647 static int
3648 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3649 {
3650 #ifdef _KERNEL
3651         uint64_t available_memory = ptob(freemem);
3652         static uint64_t page_load = 0;
3653         static uint64_t last_txg = 0;
3654 
3655 #if defined(__i386)
3656         available_memory =
3657             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3658 #endif
3659 
3660         if (freemem > physmem * arc_lotsfree_percent / 100)
3661                 return (0);
3662 
3663         if (txg > last_txg) {
3664                 last_txg = txg;
3665                 page_load = 0;
3666         }
3667         /*
3668          * If we are in pageout, we know that memory is already tight,
3669          * the arc is already going to be evicting, so we just want to
3670          * continue to let page writes occur as quickly as possible.
3671          */
3672         if (curproc == proc_pageout) {
3673                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3674                         return (SET_ERROR(ERESTART));
3675                 /* Note: reserve is inflated, so we deflate */
3676                 page_load += reserve / 8;
3677                 return (0);
3678         } else if (page_load > 0 && arc_reclaim_needed()) {
3679                 /* memory is low, delay before restarting */
3680                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3681                 return (SET_ERROR(EAGAIN));
3682         }
3683         page_load = 0;
3684 #endif
3685         return (0);
3686 }
3687 
3688 void
3689 arc_tempreserve_clear(uint64_t reserve)
3690 {
3691         atomic_add_64(&arc_tempreserve, -reserve);
3692         ASSERT((int64_t)arc_tempreserve >= 0);
3693 }
3694 
3695 int
3696 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3697 {
3698         int error;
3699         uint64_t anon_size;
3700 
3701         if (reserve > arc_c/4 && !arc_no_grow)
3702                 arc_c = MIN(arc_c_max, reserve * 4);
3703         if (reserve > arc_c)
3704                 return (SET_ERROR(ENOMEM));
3705 
3706         /*
3707          * Don't count loaned bufs as in flight dirty data to prevent long
3708          * network delays from blocking transactions that are ready to be
3709          * assigned to a txg.
3710          */
3711         anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3712 
3713         /*
3714          * Writes will, almost always, require additional memory allocations
3715          * in order to compress/encrypt/etc the data.  We therefore need to
3716          * make sure that there is sufficient available memory for this.
3717          */
3718         error = arc_memory_throttle(reserve, txg);
3719         if (error != 0)
3720                 return (error);
3721 
3722         /*
3723          * Throttle writes when the amount of dirty data in the cache
3724          * gets too large.  We try to keep the cache less than half full
3725          * of dirty blocks so that our sync times don't grow too large.
3726          * Note: if two requests come in concurrently, we might let them
3727          * both succeed, when one of them should fail.  Not a huge deal.
3728          */
3729 
3730         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3731             anon_size > arc_c / 4) {
3732                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3733                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3734                     arc_tempreserve>>10,
3735                     arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3736                     arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3737                     reserve>>10, arc_c>>10);
3738                 return (SET_ERROR(ERESTART));
3739         }
3740         atomic_add_64(&arc_tempreserve, reserve);
3741         return (0);
3742 }
3743 
3744 void
3745 arc_init(void)
3746 {
3747         mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3748         cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3749 
3750         /* Convert seconds to clock ticks */
3751         arc_min_prefetch_lifespan = 1 * hz;
3752 
3753         /* Start out with 1/8 of all memory */
3754         arc_c = physmem * PAGESIZE / 8;
3755 
3756 #ifdef _KERNEL
3757         /*
3758          * On architectures where the physical memory can be larger
3759          * than the addressable space (intel in 32-bit mode), we may
3760          * need to limit the cache to 1/8 of VM size.
3761          */
3762         arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3763 #endif
3764 
3765         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3766         arc_c_min = MAX(arc_c / 4, 64<<20);
3767         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3768         if (arc_c * 8 >= 1<<30)
3769                 arc_c_max = (arc_c * 8) - (1<<30);
3770         else
3771                 arc_c_max = arc_c_min;
3772         arc_c_max = MAX(arc_c * 6, arc_c_max);
3773 
3774         /*
3775          * Allow the tunables to override our calculations if they are
3776          * reasonable (ie. over 64MB)
3777          */
3778         if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3779                 arc_c_max = zfs_arc_max;
3780         if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3781                 arc_c_min = zfs_arc_min;
3782 
3783         arc_c = arc_c_max;
3784         arc_p = (arc_c >> 1);
3785 
3786         /* limit meta-data to 1/4 of the arc capacity */
3787         arc_meta_limit = arc_c_max / 4;
3788 
3789         /* Allow the tunable to override if it is reasonable */
3790         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3791                 arc_meta_limit = zfs_arc_meta_limit;
3792 
3793         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3794                 arc_c_min = arc_meta_limit / 2;
3795 
3796         if (zfs_arc_grow_retry > 0)
3797                 arc_grow_retry = zfs_arc_grow_retry;
3798 
3799         if (zfs_arc_shrink_shift > 0)
3800                 arc_shrink_shift = zfs_arc_shrink_shift;
3801 
3802         if (zfs_arc_p_min_shift > 0)
3803                 arc_p_min_shift = zfs_arc_p_min_shift;
3804 
3805         /* if kmem_flags are set, lets try to use less memory */
3806         if (kmem_debugging())
3807                 arc_c = arc_c / 2;
3808         if (arc_c < arc_c_min)
3809                 arc_c = arc_c_min;
3810 
3811         arc_anon = &ARC_anon;
3812         arc_mru = &ARC_mru;
3813         arc_mru_ghost = &ARC_mru_ghost;
3814         arc_mfu = &ARC_mfu;
3815         arc_mfu_ghost = &ARC_mfu_ghost;
3816         arc_l2c_only = &ARC_l2c_only;
3817         arc_size = 0;
3818 
3819         mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3820         mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3821         mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3822         mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3823         mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3824         mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3825 
3826         list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3827             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3828         list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3829             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3830         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3831             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3832         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3833             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3834         list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3835             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3836         list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3837             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3838         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3839             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3840         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3841             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3842         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3843             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3844         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3845             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3846 
3847         buf_init();
3848 
3849         arc_thread_exit = 0;
3850         arc_eviction_list = NULL;
3851         mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3852         bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3853 
3854         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3855             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3856 
3857         if (arc_ksp != NULL) {
3858                 arc_ksp->ks_data = &arc_stats;
3859                 kstat_install(arc_ksp);
3860         }
3861 
3862         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3863             TS_RUN, minclsyspri);
3864 
3865         arc_dead = FALSE;
3866         arc_warm = B_FALSE;
3867 
3868         /*
3869          * Calculate maximum amount of dirty data per pool.
3870          *
3871          * If it has been set by /etc/system, take that.
3872          * Otherwise, use a percentage of physical memory defined by
3873          * zfs_dirty_data_max_percent (default 10%) with a cap at
3874          * zfs_dirty_data_max_max (default 4GB).
3875          */
3876         if (zfs_dirty_data_max == 0) {
3877                 zfs_dirty_data_max = physmem * PAGESIZE *
3878                     zfs_dirty_data_max_percent / 100;
3879                 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
3880                     zfs_dirty_data_max_max);
3881         }
3882 }
3883 
3884 void
3885 arc_fini(void)
3886 {
3887         mutex_enter(&arc_reclaim_thr_lock);
3888         arc_thread_exit = 1;
3889         while (arc_thread_exit != 0)
3890                 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3891         mutex_exit(&arc_reclaim_thr_lock);
3892 
3893         arc_flush(NULL);
3894 
3895         arc_dead = TRUE;
3896 
3897         if (arc_ksp != NULL) {
3898                 kstat_delete(arc_ksp);
3899                 arc_ksp = NULL;
3900         }
3901 
3902         mutex_destroy(&arc_eviction_mtx);
3903         mutex_destroy(&arc_reclaim_thr_lock);
3904         cv_destroy(&arc_reclaim_thr_cv);
3905 
3906         list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3907         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3908         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3909         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3910         list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3911         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3912         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3913         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3914 
3915         mutex_destroy(&arc_anon->arcs_mtx);
3916         mutex_destroy(&arc_mru->arcs_mtx);
3917         mutex_destroy(&arc_mru_ghost->arcs_mtx);
3918         mutex_destroy(&arc_mfu->arcs_mtx);
3919         mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3920         mutex_destroy(&arc_l2c_only->arcs_mtx);
3921 
3922         buf_fini();
3923 
3924         ASSERT(arc_loaned_bytes == 0);
3925 }
3926 
3927 /*
3928  * Level 2 ARC
3929  *
3930  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3931  * It uses dedicated storage devices to hold cached data, which are populated
3932  * using large infrequent writes.  The main role of this cache is to boost
3933  * the performance of random read workloads.  The intended L2ARC devices
3934  * include short-stroked disks, solid state disks, and other media with
3935  * substantially faster read latency than disk.
3936  *
3937  *                 +-----------------------+
3938  *                 |         ARC           |
3939  *                 +-----------------------+
3940  *                    |         ^     ^
3941  *                    |         |     |
3942  *      l2arc_feed_thread()    arc_read()
3943  *                    |         |     |
3944  *                    |  l2arc read   |
3945  *                    V         |     |
3946  *               +---------------+    |
3947  *               |     L2ARC     |    |
3948  *               +---------------+    |
3949  *                   |    ^           |
3950  *          l2arc_write() |           |
3951  *                   |    |           |
3952  *                   V    |           |
3953  *                 +-------+      +-------+
3954  *                 | vdev  |      | vdev  |
3955  *                 | cache |      | cache |
3956  *                 +-------+      +-------+
3957  *                 +=========+     .-----.
3958  *                 :  L2ARC  :    |-_____-|
3959  *                 : devices :    | Disks |
3960  *                 +=========+    `-_____-'
3961  *
3962  * Read requests are satisfied from the following sources, in order:
3963  *
3964  *      1) ARC
3965  *      2) vdev cache of L2ARC devices
3966  *      3) L2ARC devices
3967  *      4) vdev cache of disks
3968  *      5) disks
3969  *
3970  * Some L2ARC device types exhibit extremely slow write performance.
3971  * To accommodate for this there are some significant differences between
3972  * the L2ARC and traditional cache design:
3973  *
3974  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3975  * the ARC behave as usual, freeing buffers and placing headers on ghost
3976  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3977  * this would add inflated write latencies for all ARC memory pressure.
3978  *
3979  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3980  * It does this by periodically scanning buffers from the eviction-end of
3981  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3982  * not already there. It scans until a headroom of buffers is satisfied,
3983  * which itself is a buffer for ARC eviction. If a compressible buffer is
3984  * found during scanning and selected for writing to an L2ARC device, we
3985  * temporarily boost scanning headroom during the next scan cycle to make
3986  * sure we adapt to compression effects (which might significantly reduce
3987  * the data volume we write to L2ARC). The thread that does this is
3988  * l2arc_feed_thread(), illustrated below; example sizes are included to
3989  * provide a better sense of ratio than this diagram:
3990  *
3991  *             head -->                        tail
3992  *              +---------------------+----------+
3993  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3994  *              +---------------------+----------+   |   o L2ARC eligible
3995  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3996  *              +---------------------+----------+   |
3997  *                   15.9 Gbytes      ^ 32 Mbytes    |
3998  *                                 headroom          |
3999  *                                            l2arc_feed_thread()
4000  *                                                   |
4001  *                       l2arc write hand <--[oooo]--'
4002  *                               |           8 Mbyte
4003  *                               |          write max
4004  *                               V
4005  *                +==============================+
4006  *      L2ARC dev |####|#|###|###|    |####| ... |
4007  *                +==============================+
4008  *                           32 Gbytes
4009  *
4010  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4011  * evicted, then the L2ARC has cached a buffer much sooner than it probably
4012  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
4013  * safe to say that this is an uncommon case, since buffers at the end of
4014  * the ARC lists have moved there due to inactivity.
4015  *
4016  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4017  * then the L2ARC simply misses copying some buffers.  This serves as a
4018  * pressure valve to prevent heavy read workloads from both stalling the ARC
4019  * with waits and clogging the L2ARC with writes.  This also helps prevent
4020  * the potential for the L2ARC to churn if it attempts to cache content too
4021  * quickly, such as during backups of the entire pool.
4022  *
4023  * 5. After system boot and before the ARC has filled main memory, there are
4024  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4025  * lists can remain mostly static.  Instead of searching from tail of these
4026  * lists as pictured, the l2arc_feed_thread() will search from the list heads
4027  * for eligible buffers, greatly increasing its chance of finding them.
4028  *
4029  * The L2ARC device write speed is also boosted during this time so that
4030  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
4031  * there are no L2ARC reads, and no fear of degrading read performance
4032  * through increased writes.
4033  *
4034  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4035  * the vdev queue can aggregate them into larger and fewer writes.  Each
4036  * device is written to in a rotor fashion, sweeping writes through
4037  * available space then repeating.
4038  *
4039  * 7. The L2ARC does not store dirty content.  It never needs to flush
4040  * write buffers back to disk based storage.
4041  *
4042  * 8. If an ARC buffer is written (and dirtied) which also exists in the
4043  * L2ARC, the now stale L2ARC buffer is immediately dropped.
4044  *
4045  * The performance of the L2ARC can be tweaked by a number of tunables, which
4046  * may be necessary for different workloads:
4047  *
4048  *      l2arc_write_max         max write bytes per interval
4049  *      l2arc_write_boost       extra write bytes during device warmup
4050  *      l2arc_noprefetch        skip caching prefetched buffers
4051  *      l2arc_headroom          number of max device writes to precache
4052  *      l2arc_headroom_boost    when we find compressed buffers during ARC
4053  *                              scanning, we multiply headroom by this
4054  *                              percentage factor for the next scan cycle,
4055  *                              since more compressed buffers are likely to
4056  *                              be present
4057  *      l2arc_feed_secs         seconds between L2ARC writing
4058  *
4059  * Tunables may be removed or added as future performance improvements are
4060  * integrated, and also may become zpool properties.
4061  *
4062  * There are three key functions that control how the L2ARC warms up:
4063  *
4064  *      l2arc_write_eligible()  check if a buffer is eligible to cache
4065  *      l2arc_write_size()      calculate how much to write
4066  *      l2arc_write_interval()  calculate sleep delay between writes
4067  *
4068  * These three functions determine what to write, how much, and how quickly
4069  * to send writes.
4070  */
4071 
4072 static boolean_t
4073 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4074 {
4075         /*
4076          * A buffer is *not* eligible for the L2ARC if it:
4077          * 1. belongs to a different spa.
4078          * 2. is already cached on the L2ARC.
4079          * 3. has an I/O in progress (it may be an incomplete read).
4080          * 4. is flagged not eligible (zfs property).
4081          */
4082         if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
4083             HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
4084                 return (B_FALSE);
4085 
4086         return (B_TRUE);
4087 }
4088 
4089 static uint64_t
4090 l2arc_write_size(void)
4091 {
4092         uint64_t size;
4093 
4094         /*
4095          * Make sure our globals have meaningful values in case the user
4096          * altered them.
4097          */
4098         size = l2arc_write_max;
4099         if (size == 0) {
4100                 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4101                     "be greater than zero, resetting it to the default (%d)",
4102                     L2ARC_WRITE_SIZE);
4103                 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4104         }
4105 
4106         if (arc_warm == B_FALSE)
4107                 size += l2arc_write_boost;
4108 
4109         return (size);
4110 
4111 }
4112 
4113 static clock_t
4114 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4115 {
4116         clock_t interval, next, now;
4117 
4118         /*
4119          * If the ARC lists are busy, increase our write rate; if the
4120          * lists are stale, idle back.  This is achieved by checking
4121          * how much we previously wrote - if it was more than half of
4122          * what we wanted, schedule the next write much sooner.
4123          */
4124         if (l2arc_feed_again && wrote > (wanted / 2))
4125                 interval = (hz * l2arc_feed_min_ms) / 1000;
4126         else
4127                 interval = hz * l2arc_feed_secs;
4128 
4129         now = ddi_get_lbolt();
4130         next = MAX(now, MIN(now + interval, began + interval));
4131 
4132         return (next);
4133 }
4134 
4135 static void
4136 l2arc_hdr_stat_add(void)
4137 {
4138         ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4139         ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4140 }
4141 
4142 static void
4143 l2arc_hdr_stat_remove(void)
4144 {
4145         ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4146         ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4147 }
4148 
4149 /*
4150  * Cycle through L2ARC devices.  This is how L2ARC load balances.
4151  * If a device is returned, this also returns holding the spa config lock.
4152  */
4153 static l2arc_dev_t *
4154 l2arc_dev_get_next(void)
4155 {
4156         l2arc_dev_t *first, *next = NULL;
4157 
4158         /*
4159          * Lock out the removal of spas (spa_namespace_lock), then removal
4160          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
4161          * both locks will be dropped and a spa config lock held instead.
4162          */
4163         mutex_enter(&spa_namespace_lock);
4164         mutex_enter(&l2arc_dev_mtx);
4165 
4166         /* if there are no vdevs, there is nothing to do */
4167         if (l2arc_ndev == 0)
4168                 goto out;
4169 
4170         first = NULL;
4171         next = l2arc_dev_last;
4172         do {
4173                 /* loop around the list looking for a non-faulted vdev */
4174                 if (next == NULL) {
4175                         next = list_head(l2arc_dev_list);
4176                 } else {
4177                         next = list_next(l2arc_dev_list, next);
4178                         if (next == NULL)
4179                                 next = list_head(l2arc_dev_list);
4180                 }
4181 
4182                 /* if we have come back to the start, bail out */
4183                 if (first == NULL)
4184                         first = next;
4185                 else if (next == first)
4186                         break;
4187 
4188         } while (vdev_is_dead(next->l2ad_vdev));
4189 
4190         /* if we were unable to find any usable vdevs, return NULL */
4191         if (vdev_is_dead(next->l2ad_vdev))
4192                 next = NULL;
4193 
4194         l2arc_dev_last = next;
4195 
4196 out:
4197         mutex_exit(&l2arc_dev_mtx);
4198 
4199         /*
4200          * Grab the config lock to prevent the 'next' device from being
4201          * removed while we are writing to it.
4202          */
4203         if (next != NULL)
4204                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4205         mutex_exit(&spa_namespace_lock);
4206 
4207         return (next);
4208 }
4209 
4210 /*
4211  * Free buffers that were tagged for destruction.
4212  */
4213 static void
4214 l2arc_do_free_on_write()
4215 {
4216         list_t *buflist;
4217         l2arc_data_free_t *df, *df_prev;
4218 
4219         mutex_enter(&l2arc_free_on_write_mtx);
4220         buflist = l2arc_free_on_write;
4221 
4222         for (df = list_tail(buflist); df; df = df_prev) {
4223                 df_prev = list_prev(buflist, df);
4224                 ASSERT(df->l2df_data != NULL);
4225                 ASSERT(df->l2df_func != NULL);
4226                 df->l2df_func(df->l2df_data, df->l2df_size);
4227                 list_remove(buflist, df);
4228                 kmem_free(df, sizeof (l2arc_data_free_t));
4229         }
4230 
4231         mutex_exit(&l2arc_free_on_write_mtx);
4232 }
4233 
4234 /*
4235  * A write to a cache device has completed.  Update all headers to allow
4236  * reads from these buffers to begin.
4237  */
4238 static void
4239 l2arc_write_done(zio_t *zio)
4240 {
4241         l2arc_write_callback_t *cb;
4242         l2arc_dev_t *dev;
4243         list_t *buflist;
4244         arc_buf_hdr_t *head, *ab, *ab_prev;
4245         l2arc_buf_hdr_t *abl2;
4246         kmutex_t *hash_lock;
4247 
4248         cb = zio->io_private;
4249         ASSERT(cb != NULL);
4250         dev = cb->l2wcb_dev;
4251         ASSERT(dev != NULL);
4252         head = cb->l2wcb_head;
4253         ASSERT(head != NULL);
4254         buflist = dev->l2ad_buflist;
4255         ASSERT(buflist != NULL);
4256         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4257             l2arc_write_callback_t *, cb);
4258 
4259         if (zio->io_error != 0)
4260                 ARCSTAT_BUMP(arcstat_l2_writes_error);
4261 
4262         mutex_enter(&l2arc_buflist_mtx);
4263 
4264         /*
4265          * All writes completed, or an error was hit.
4266          */
4267         for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4268                 ab_prev = list_prev(buflist, ab);
4269 
4270                 hash_lock = HDR_LOCK(ab);
4271                 if (!mutex_tryenter(hash_lock)) {
4272                         /*
4273                          * This buffer misses out.  It may be in a stage
4274                          * of eviction.  Its ARC_L2_WRITING flag will be
4275                          * left set, denying reads to this buffer.
4276                          */
4277                         ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4278                         continue;
4279                 }
4280 
4281                 abl2 = ab->b_l2hdr;
4282 
4283                 /*
4284                  * Release the temporary compressed buffer as soon as possible.
4285                  */
4286                 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4287                         l2arc_release_cdata_buf(ab);
4288 
4289                 if (zio->io_error != 0) {
4290                         /*
4291                          * Error - drop L2ARC entry.
4292                          */
4293                         list_remove(buflist, ab);
4294                         ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4295                         ab->b_l2hdr = NULL;
4296                         kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4297                         ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4298                 }
4299 
4300                 /*
4301                  * Allow ARC to begin reads to this L2ARC entry.
4302                  */
4303                 ab->b_flags &= ~ARC_L2_WRITING;
4304 
4305                 mutex_exit(hash_lock);
4306         }
4307 
4308         atomic_inc_64(&l2arc_writes_done);
4309         list_remove(buflist, head);
4310         kmem_cache_free(hdr_cache, head);
4311         mutex_exit(&l2arc_buflist_mtx);
4312 
4313         l2arc_do_free_on_write();
4314 
4315         kmem_free(cb, sizeof (l2arc_write_callback_t));
4316 }
4317 
4318 /*
4319  * A read to a cache device completed.  Validate buffer contents before
4320  * handing over to the regular ARC routines.
4321  */
4322 static void
4323 l2arc_read_done(zio_t *zio)
4324 {
4325         l2arc_read_callback_t *cb;
4326         arc_buf_hdr_t *hdr;
4327         arc_buf_t *buf;
4328         kmutex_t *hash_lock;
4329         int equal;
4330 
4331         ASSERT(zio->io_vd != NULL);
4332         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4333 
4334         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4335 
4336         cb = zio->io_private;
4337         ASSERT(cb != NULL);
4338         buf = cb->l2rcb_buf;
4339         ASSERT(buf != NULL);
4340 
4341         hash_lock = HDR_LOCK(buf->b_hdr);
4342         mutex_enter(hash_lock);
4343         hdr = buf->b_hdr;
4344         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4345 
4346         /*
4347          * If the buffer was compressed, decompress it first.
4348          */
4349         if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4350                 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4351         ASSERT(zio->io_data != NULL);
4352 
4353         /*
4354          * Check this survived the L2ARC journey.
4355          */
4356         equal = arc_cksum_equal(buf);
4357         if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4358                 mutex_exit(hash_lock);
4359                 zio->io_private = buf;
4360                 zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
4361                 zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
4362                 arc_read_done(zio);
4363         } else {
4364                 mutex_exit(hash_lock);
4365                 /*
4366                  * Buffer didn't survive caching.  Increment stats and
4367                  * reissue to the original storage device.
4368                  */
4369                 if (zio->io_error != 0) {
4370                         ARCSTAT_BUMP(arcstat_l2_io_error);
4371                 } else {
4372                         zio->io_error = SET_ERROR(EIO);
4373                 }
4374                 if (!equal)
4375                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4376 
4377                 /*
4378                  * If there's no waiter, issue an async i/o to the primary
4379                  * storage now.  If there *is* a waiter, the caller must
4380                  * issue the i/o in a context where it's OK to block.
4381                  */
4382                 if (zio->io_waiter == NULL) {
4383                         zio_t *pio = zio_unique_parent(zio);
4384 
4385                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4386 
4387                         zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4388                             buf->b_data, zio->io_size, arc_read_done, buf,
4389                             zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4390                 }
4391         }
4392 
4393         kmem_free(cb, sizeof (l2arc_read_callback_t));
4394 }
4395 
4396 /*
4397  * This is the list priority from which the L2ARC will search for pages to
4398  * cache.  This is used within loops (0..3) to cycle through lists in the
4399  * desired order.  This order can have a significant effect on cache
4400  * performance.
4401  *
4402  * Currently the metadata lists are hit first, MFU then MRU, followed by
4403  * the data lists.  This function returns a locked list, and also returns
4404  * the lock pointer.
4405  */
4406 static list_t *
4407 l2arc_list_locked(int list_num, kmutex_t **lock)
4408 {
4409         list_t *list = NULL;
4410 
4411         ASSERT(list_num >= 0 && list_num <= 3);
4412 
4413         switch (list_num) {
4414         case 0:
4415                 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4416                 *lock = &arc_mfu->arcs_mtx;
4417                 break;
4418         case 1:
4419                 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4420                 *lock = &arc_mru->arcs_mtx;
4421                 break;
4422         case 2:
4423                 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4424                 *lock = &arc_mfu->arcs_mtx;
4425                 break;
4426         case 3:
4427                 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4428                 *lock = &arc_mru->arcs_mtx;
4429                 break;
4430         }
4431 
4432         ASSERT(!(MUTEX_HELD(*lock)));
4433         mutex_enter(*lock);
4434         return (list);
4435 }
4436 
4437 /*
4438  * Evict buffers from the device write hand to the distance specified in
4439  * bytes.  This distance may span populated buffers, it may span nothing.
4440  * This is clearing a region on the L2ARC device ready for writing.
4441  * If the 'all' boolean is set, every buffer is evicted.
4442  */
4443 static void
4444 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4445 {
4446         list_t *buflist;
4447         l2arc_buf_hdr_t *abl2;
4448         arc_buf_hdr_t *ab, *ab_prev;
4449         kmutex_t *hash_lock;
4450         uint64_t taddr;
4451 
4452         buflist = dev->l2ad_buflist;
4453 
4454         if (buflist == NULL)
4455                 return;
4456 
4457         if (!all && dev->l2ad_first) {
4458                 /*
4459                  * This is the first sweep through the device.  There is
4460                  * nothing to evict.
4461                  */
4462                 return;
4463         }
4464 
4465         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4466                 /*
4467                  * When nearing the end of the device, evict to the end
4468                  * before the device write hand jumps to the start.
4469                  */
4470                 taddr = dev->l2ad_end;
4471         } else {
4472                 taddr = dev->l2ad_hand + distance;
4473         }
4474         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4475             uint64_t, taddr, boolean_t, all);
4476 
4477 top:
4478         mutex_enter(&l2arc_buflist_mtx);
4479         for (ab = list_tail(buflist); ab; ab = ab_prev) {
4480                 ab_prev = list_prev(buflist, ab);
4481 
4482                 hash_lock = HDR_LOCK(ab);
4483                 if (!mutex_tryenter(hash_lock)) {
4484                         /*
4485                          * Missed the hash lock.  Retry.
4486                          */
4487                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4488                         mutex_exit(&l2arc_buflist_mtx);
4489                         mutex_enter(hash_lock);
4490                         mutex_exit(hash_lock);
4491                         goto top;
4492                 }
4493 
4494                 if (HDR_L2_WRITE_HEAD(ab)) {
4495                         /*
4496                          * We hit a write head node.  Leave it for
4497                          * l2arc_write_done().
4498                          */
4499                         list_remove(buflist, ab);
4500                         mutex_exit(hash_lock);
4501                         continue;
4502                 }
4503 
4504                 if (!all && ab->b_l2hdr != NULL &&
4505                     (ab->b_l2hdr->b_daddr > taddr ||
4506                     ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4507                         /*
4508                          * We've evicted to the target address,
4509                          * or the end of the device.
4510                          */
4511                         mutex_exit(hash_lock);
4512                         break;
4513                 }
4514 
4515                 if (HDR_FREE_IN_PROGRESS(ab)) {
4516                         /*
4517                          * Already on the path to destruction.
4518                          */
4519                         mutex_exit(hash_lock);
4520                         continue;
4521                 }
4522 
4523                 if (ab->b_state == arc_l2c_only) {
4524                         ASSERT(!HDR_L2_READING(ab));
4525                         /*
4526                          * This doesn't exist in the ARC.  Destroy.
4527                          * arc_hdr_destroy() will call list_remove()
4528                          * and decrement arcstat_l2_size.
4529                          */
4530                         arc_change_state(arc_anon, ab, hash_lock);
4531                         arc_hdr_destroy(ab);
4532                 } else {
4533                         /*
4534                          * Invalidate issued or about to be issued
4535                          * reads, since we may be about to write
4536                          * over this location.
4537                          */
4538                         if (HDR_L2_READING(ab)) {
4539                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4540                                 ab->b_flags |= ARC_L2_EVICTED;
4541                         }
4542 
4543                         /*
4544                          * Tell ARC this no longer exists in L2ARC.
4545                          */
4546                         if (ab->b_l2hdr != NULL) {
4547                                 abl2 = ab->b_l2hdr;
4548                                 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4549                                 ab->b_l2hdr = NULL;
4550                                 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4551                                 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4552                         }
4553                         list_remove(buflist, ab);
4554 
4555                         /*
4556                          * This may have been leftover after a
4557                          * failed write.
4558                          */
4559                         ab->b_flags &= ~ARC_L2_WRITING;
4560                 }
4561                 mutex_exit(hash_lock);
4562         }
4563         mutex_exit(&l2arc_buflist_mtx);
4564 
4565         vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4566         dev->l2ad_evict = taddr;
4567 }
4568 
4569 /*
4570  * Find and write ARC buffers to the L2ARC device.
4571  *
4572  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4573  * for reading until they have completed writing.
4574  * The headroom_boost is an in-out parameter used to maintain headroom boost
4575  * state between calls to this function.
4576  *
4577  * Returns the number of bytes actually written (which may be smaller than
4578  * the delta by which the device hand has changed due to alignment).
4579  */
4580 static uint64_t
4581 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4582     boolean_t *headroom_boost)
4583 {
4584         arc_buf_hdr_t *ab, *ab_prev, *head;
4585         list_t *list;
4586         uint64_t write_asize, write_psize, write_sz, headroom,
4587             buf_compress_minsz;
4588         void *buf_data;
4589         kmutex_t *list_lock;
4590         boolean_t full;
4591         l2arc_write_callback_t *cb;
4592         zio_t *pio, *wzio;
4593         uint64_t guid = spa_load_guid(spa);
4594         const boolean_t do_headroom_boost = *headroom_boost;
4595 
4596         ASSERT(dev->l2ad_vdev != NULL);
4597 
4598         /* Lower the flag now, we might want to raise it again later. */
4599         *headroom_boost = B_FALSE;
4600 
4601         pio = NULL;
4602         write_sz = write_asize = write_psize = 0;
4603         full = B_FALSE;
4604         head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4605         head->b_flags |= ARC_L2_WRITE_HEAD;
4606 
4607         /*
4608          * We will want to try to compress buffers that are at least 2x the
4609          * device sector size.
4610          */
4611         buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4612 
4613         /*
4614          * Copy buffers for L2ARC writing.
4615          */
4616         mutex_enter(&l2arc_buflist_mtx);
4617         for (int try = 0; try <= 3; try++) {
4618                 uint64_t passed_sz = 0;
4619 
4620                 list = l2arc_list_locked(try, &list_lock);
4621 
4622                 /*
4623                  * L2ARC fast warmup.
4624                  *
4625                  * Until the ARC is warm and starts to evict, read from the
4626                  * head of the ARC lists rather than the tail.
4627                  */
4628                 if (arc_warm == B_FALSE)
4629                         ab = list_head(list);
4630                 else
4631                         ab = list_tail(list);
4632 
4633                 headroom = target_sz * l2arc_headroom;
4634                 if (do_headroom_boost)
4635                         headroom = (headroom * l2arc_headroom_boost) / 100;
4636 
4637                 for (; ab; ab = ab_prev) {
4638                         l2arc_buf_hdr_t *l2hdr;
4639                         kmutex_t *hash_lock;
4640                         uint64_t buf_sz;
4641 
4642                         if (arc_warm == B_FALSE)
4643                                 ab_prev = list_next(list, ab);
4644                         else
4645                                 ab_prev = list_prev(list, ab);
4646 
4647                         hash_lock = HDR_LOCK(ab);
4648                         if (!mutex_tryenter(hash_lock)) {
4649                                 /*
4650                                  * Skip this buffer rather than waiting.
4651                                  */
4652                                 continue;
4653                         }
4654 
4655                         passed_sz += ab->b_size;
4656                         if (passed_sz > headroom) {
4657                                 /*
4658                                  * Searched too far.
4659                                  */
4660                                 mutex_exit(hash_lock);
4661                                 break;
4662                         }
4663 
4664                         if (!l2arc_write_eligible(guid, ab)) {
4665                                 mutex_exit(hash_lock);
4666                                 continue;
4667                         }
4668 
4669                         if ((write_sz + ab->b_size) > target_sz) {
4670                                 full = B_TRUE;
4671                                 mutex_exit(hash_lock);
4672                                 break;
4673                         }
4674 
4675                         if (pio == NULL) {
4676                                 /*
4677                                  * Insert a dummy header on the buflist so
4678                                  * l2arc_write_done() can find where the
4679                                  * write buffers begin without searching.
4680                                  */
4681                                 list_insert_head(dev->l2ad_buflist, head);
4682 
4683                                 cb = kmem_alloc(
4684                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
4685                                 cb->l2wcb_dev = dev;
4686                                 cb->l2wcb_head = head;
4687                                 pio = zio_root(spa, l2arc_write_done, cb,
4688                                     ZIO_FLAG_CANFAIL);
4689                         }
4690 
4691                         /*
4692                          * Create and add a new L2ARC header.
4693                          */
4694                         l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4695                         l2hdr->b_dev = dev;
4696                         ab->b_flags |= ARC_L2_WRITING;
4697 
4698                         /*
4699                          * Temporarily stash the data buffer in b_tmp_cdata.
4700                          * The subsequent write step will pick it up from
4701                          * there. This is because can't access ab->b_buf
4702                          * without holding the hash_lock, which we in turn
4703                          * can't access without holding the ARC list locks
4704                          * (which we want to avoid during compression/writing).
4705                          */
4706                         l2hdr->b_compress = ZIO_COMPRESS_OFF;
4707                         l2hdr->b_asize = ab->b_size;
4708                         l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4709 
4710                         buf_sz = ab->b_size;
4711                         ab->b_l2hdr = l2hdr;
4712 
4713                         list_insert_head(dev->l2ad_buflist, ab);
4714 
4715                         /*
4716                          * Compute and store the buffer cksum before
4717                          * writing.  On debug the cksum is verified first.
4718                          */
4719                         arc_cksum_verify(ab->b_buf);
4720                         arc_cksum_compute(ab->b_buf, B_TRUE);
4721 
4722                         mutex_exit(hash_lock);
4723 
4724                         write_sz += buf_sz;
4725                 }
4726 
4727                 mutex_exit(list_lock);
4728 
4729                 if (full == B_TRUE)
4730                         break;
4731         }
4732 
4733         /* No buffers selected for writing? */
4734         if (pio == NULL) {
4735                 ASSERT0(write_sz);
4736                 mutex_exit(&l2arc_buflist_mtx);
4737                 kmem_cache_free(hdr_cache, head);
4738                 return (0);
4739         }
4740 
4741         /*
4742          * Now start writing the buffers. We're starting at the write head
4743          * and work backwards, retracing the course of the buffer selector
4744          * loop above.
4745          */
4746         for (ab = list_prev(dev->l2ad_buflist, head); ab;
4747             ab = list_prev(dev->l2ad_buflist, ab)) {
4748                 l2arc_buf_hdr_t *l2hdr;
4749                 uint64_t buf_sz;
4750 
4751                 /*
4752                  * We shouldn't need to lock the buffer here, since we flagged
4753                  * it as ARC_L2_WRITING in the previous step, but we must take
4754                  * care to only access its L2 cache parameters. In particular,
4755                  * ab->b_buf may be invalid by now due to ARC eviction.
4756                  */
4757                 l2hdr = ab->b_l2hdr;
4758                 l2hdr->b_daddr = dev->l2ad_hand;
4759 
4760                 if ((ab->b_flags & ARC_L2COMPRESS) &&
4761                     l2hdr->b_asize >= buf_compress_minsz) {
4762                         if (l2arc_compress_buf(l2hdr)) {
4763                                 /*
4764                                  * If compression succeeded, enable headroom
4765                                  * boost on the next scan cycle.
4766                                  */
4767                                 *headroom_boost = B_TRUE;
4768                         }
4769                 }
4770 
4771                 /*
4772                  * Pick up the buffer data we had previously stashed away
4773                  * (and now potentially also compressed).
4774                  */
4775                 buf_data = l2hdr->b_tmp_cdata;
4776                 buf_sz = l2hdr->b_asize;
4777 
4778                 /* Compression may have squashed the buffer to zero length. */
4779                 if (buf_sz != 0) {
4780                         uint64_t buf_p_sz;
4781 
4782                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
4783                             dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4784                             NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4785                             ZIO_FLAG_CANFAIL, B_FALSE);
4786 
4787                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4788                             zio_t *, wzio);
4789                         (void) zio_nowait(wzio);
4790 
4791                         write_asize += buf_sz;
4792                         /*
4793                          * Keep the clock hand suitably device-aligned.
4794                          */
4795                         buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4796                         write_psize += buf_p_sz;
4797                         dev->l2ad_hand += buf_p_sz;
4798                 }
4799         }
4800 
4801         mutex_exit(&l2arc_buflist_mtx);
4802 
4803         ASSERT3U(write_asize, <=, target_sz);
4804         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4805         ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
4806         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4807         ARCSTAT_INCR(arcstat_l2_asize, write_asize);
4808         vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
4809 
4810         /*
4811          * Bump device hand to the device start if it is approaching the end.
4812          * l2arc_evict() will already have evicted ahead for this case.
4813          */
4814         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4815                 vdev_space_update(dev->l2ad_vdev,
4816                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4817                 dev->l2ad_hand = dev->l2ad_start;
4818                 dev->l2ad_evict = dev->l2ad_start;
4819                 dev->l2ad_first = B_FALSE;
4820         }
4821 
4822         dev->l2ad_writing = B_TRUE;
4823         (void) zio_wait(pio);
4824         dev->l2ad_writing = B_FALSE;
4825 
4826         return (write_asize);
4827 }
4828 
4829 /*
4830  * Compresses an L2ARC buffer.
4831  * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
4832  * size in l2hdr->b_asize. This routine tries to compress the data and
4833  * depending on the compression result there are three possible outcomes:
4834  * *) The buffer was incompressible. The original l2hdr contents were left
4835  *    untouched and are ready for writing to an L2 device.
4836  * *) The buffer was all-zeros, so there is no need to write it to an L2
4837  *    device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
4838  *    set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
4839  * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
4840  *    data buffer which holds the compressed data to be written, and b_asize
4841  *    tells us how much data there is. b_compress is set to the appropriate
4842  *    compression algorithm. Once writing is done, invoke
4843  *    l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
4844  *
4845  * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
4846  * buffer was incompressible).
4847  */
4848 static boolean_t
4849 l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
4850 {
4851         void *cdata;
4852         size_t csize, len;
4853 
4854         ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
4855         ASSERT(l2hdr->b_tmp_cdata != NULL);
4856 
4857         len = l2hdr->b_asize;
4858         cdata = zio_data_buf_alloc(len);
4859         csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
4860             cdata, l2hdr->b_asize);
4861 
4862         if (csize == 0) {
4863                 /* zero block, indicate that there's nothing to write */
4864                 zio_data_buf_free(cdata, len);
4865                 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
4866                 l2hdr->b_asize = 0;
4867                 l2hdr->b_tmp_cdata = NULL;
4868                 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
4869                 return (B_TRUE);
4870         } else if (csize > 0 && csize < len) {
4871                 /*
4872                  * Compression succeeded, we'll keep the cdata around for
4873                  * writing and release it afterwards.
4874                  */
4875                 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
4876                 l2hdr->b_asize = csize;
4877                 l2hdr->b_tmp_cdata = cdata;
4878                 ARCSTAT_BUMP(arcstat_l2_compress_successes);
4879                 return (B_TRUE);
4880         } else {
4881                 /*
4882                  * Compression failed, release the compressed buffer.
4883                  * l2hdr will be left unmodified.
4884                  */
4885                 zio_data_buf_free(cdata, len);
4886                 ARCSTAT_BUMP(arcstat_l2_compress_failures);
4887                 return (B_FALSE);
4888         }
4889 }
4890 
4891 /*
4892  * Decompresses a zio read back from an l2arc device. On success, the
4893  * underlying zio's io_data buffer is overwritten by the uncompressed
4894  * version. On decompression error (corrupt compressed stream), the
4895  * zio->io_error value is set to signal an I/O error.
4896  *
4897  * Please note that the compressed data stream is not checksummed, so
4898  * if the underlying device is experiencing data corruption, we may feed
4899  * corrupt data to the decompressor, so the decompressor needs to be
4900  * able to handle this situation (LZ4 does).
4901  */
4902 static void
4903 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
4904 {
4905         ASSERT(L2ARC_IS_VALID_COMPRESS(c));
4906 
4907         if (zio->io_error != 0) {
4908                 /*
4909                  * An io error has occured, just restore the original io
4910                  * size in preparation for a main pool read.
4911                  */
4912                 zio->io_orig_size = zio->io_size = hdr->b_size;
4913                 return;
4914         }
4915 
4916         if (c == ZIO_COMPRESS_EMPTY) {
4917                 /*
4918                  * An empty buffer results in a null zio, which means we
4919                  * need to fill its io_data after we're done restoring the
4920                  * buffer's contents.
4921                  */
4922                 ASSERT(hdr->b_buf != NULL);
4923                 bzero(hdr->b_buf->b_data, hdr->b_size);
4924                 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
4925         } else {
4926                 ASSERT(zio->io_data != NULL);
4927                 /*
4928                  * We copy the compressed data from the start of the arc buffer
4929                  * (the zio_read will have pulled in only what we need, the
4930                  * rest is garbage which we will overwrite at decompression)
4931                  * and then decompress back to the ARC data buffer. This way we
4932                  * can minimize copying by simply decompressing back over the
4933                  * original compressed data (rather than decompressing to an
4934                  * aux buffer and then copying back the uncompressed buffer,
4935                  * which is likely to be much larger).
4936                  */
4937                 uint64_t csize;
4938                 void *cdata;
4939 
4940                 csize = zio->io_size;
4941                 cdata = zio_data_buf_alloc(csize);
4942                 bcopy(zio->io_data, cdata, csize);
4943                 if (zio_decompress_data(c, cdata, zio->io_data, csize,
4944                     hdr->b_size) != 0)
4945                         zio->io_error = EIO;
4946                 zio_data_buf_free(cdata, csize);
4947         }
4948 
4949         /* Restore the expected uncompressed IO size. */
4950         zio->io_orig_size = zio->io_size = hdr->b_size;
4951 }
4952 
4953 /*
4954  * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
4955  * This buffer serves as a temporary holder of compressed data while
4956  * the buffer entry is being written to an l2arc device. Once that is
4957  * done, we can dispose of it.
4958  */
4959 static void
4960 l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
4961 {
4962         l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
4963 
4964         if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
4965                 /*
4966                  * If the data was compressed, then we've allocated a
4967                  * temporary buffer for it, so now we need to release it.
4968                  */
4969                 ASSERT(l2hdr->b_tmp_cdata != NULL);
4970                 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
4971         }
4972         l2hdr->b_tmp_cdata = NULL;
4973 }
4974 
4975 /*
4976  * This thread feeds the L2ARC at regular intervals.  This is the beating
4977  * heart of the L2ARC.
4978  */
4979 static void
4980 l2arc_feed_thread(void)
4981 {
4982         callb_cpr_t cpr;
4983         l2arc_dev_t *dev;
4984         spa_t *spa;
4985         uint64_t size, wrote;
4986         clock_t begin, next = ddi_get_lbolt();
4987         boolean_t headroom_boost = B_FALSE;
4988 
4989         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4990 
4991         mutex_enter(&l2arc_feed_thr_lock);
4992 
4993         while (l2arc_thread_exit == 0) {
4994                 CALLB_CPR_SAFE_BEGIN(&cpr);
4995                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4996                     next);
4997                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4998                 next = ddi_get_lbolt() + hz;
4999 
5000                 /*
5001                  * Quick check for L2ARC devices.
5002                  */
5003                 mutex_enter(&l2arc_dev_mtx);
5004                 if (l2arc_ndev == 0) {
5005                         mutex_exit(&l2arc_dev_mtx);
5006                         continue;
5007                 }
5008                 mutex_exit(&l2arc_dev_mtx);
5009                 begin = ddi_get_lbolt();
5010 
5011                 /*
5012                  * This selects the next l2arc device to write to, and in
5013                  * doing so the next spa to feed from: dev->l2ad_spa.   This
5014                  * will return NULL if there are now no l2arc devices or if
5015                  * they are all faulted.
5016                  *
5017                  * If a device is returned, its spa's config lock is also
5018                  * held to prevent device removal.  l2arc_dev_get_next()
5019                  * will grab and release l2arc_dev_mtx.
5020                  */
5021                 if ((dev = l2arc_dev_get_next()) == NULL)
5022                         continue;
5023 
5024                 spa = dev->l2ad_spa;
5025                 ASSERT(spa != NULL);
5026 
5027                 /*
5028                  * If the pool is read-only then force the feed thread to
5029                  * sleep a little longer.
5030                  */
5031                 if (!spa_writeable(spa)) {
5032                         next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5033                         spa_config_exit(spa, SCL_L2ARC, dev);
5034                         continue;
5035                 }
5036 
5037                 /*
5038                  * Avoid contributing to memory pressure.
5039                  */
5040                 if (arc_reclaim_needed()) {
5041                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5042                         spa_config_exit(spa, SCL_L2ARC, dev);
5043                         continue;
5044                 }
5045 
5046                 ARCSTAT_BUMP(arcstat_l2_feeds);
5047 
5048                 size = l2arc_write_size();
5049 
5050                 /*
5051                  * Evict L2ARC buffers that will be overwritten.
5052                  */
5053                 l2arc_evict(dev, size, B_FALSE);
5054 
5055                 /*
5056                  * Write ARC buffers.
5057                  */
5058                 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5059 
5060                 /*
5061                  * Calculate interval between writes.
5062                  */
5063                 next = l2arc_write_interval(begin, size, wrote);
5064                 spa_config_exit(spa, SCL_L2ARC, dev);
5065         }
5066 
5067         l2arc_thread_exit = 0;
5068         cv_broadcast(&l2arc_feed_thr_cv);
5069         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
5070         thread_exit();
5071 }
5072 
5073 boolean_t
5074 l2arc_vdev_present(vdev_t *vd)
5075 {
5076         l2arc_dev_t *dev;
5077 
5078         mutex_enter(&l2arc_dev_mtx);
5079         for (dev = list_head(l2arc_dev_list); dev != NULL;
5080             dev = list_next(l2arc_dev_list, dev)) {
5081                 if (dev->l2ad_vdev == vd)
5082                         break;
5083         }
5084         mutex_exit(&l2arc_dev_mtx);
5085 
5086         return (dev != NULL);
5087 }
5088 
5089 /*
5090  * Add a vdev for use by the L2ARC.  By this point the spa has already
5091  * validated the vdev and opened it.
5092  */
5093 void
5094 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5095 {
5096         l2arc_dev_t *adddev;
5097 
5098         ASSERT(!l2arc_vdev_present(vd));
5099 
5100         /*
5101          * Create a new l2arc device entry.
5102          */
5103         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5104         adddev->l2ad_spa = spa;
5105         adddev->l2ad_vdev = vd;
5106         adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5107         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5108         adddev->l2ad_hand = adddev->l2ad_start;
5109         adddev->l2ad_evict = adddev->l2ad_start;
5110         adddev->l2ad_first = B_TRUE;
5111         adddev->l2ad_writing = B_FALSE;
5112 
5113         /*
5114          * This is a list of all ARC buffers that are still valid on the
5115          * device.
5116          */
5117         adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5118         list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5119             offsetof(arc_buf_hdr_t, b_l2node));
5120 
5121         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5122 
5123         /*
5124          * Add device to global list
5125          */
5126         mutex_enter(&l2arc_dev_mtx);
5127         list_insert_head(l2arc_dev_list, adddev);
5128         atomic_inc_64(&l2arc_ndev);
5129         mutex_exit(&l2arc_dev_mtx);
5130 }
5131 
5132 /*
5133  * Remove a vdev from the L2ARC.
5134  */
5135 void
5136 l2arc_remove_vdev(vdev_t *vd)
5137 {
5138         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5139 
5140         /*
5141          * Find the device by vdev
5142          */
5143         mutex_enter(&l2arc_dev_mtx);
5144         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5145                 nextdev = list_next(l2arc_dev_list, dev);
5146                 if (vd == dev->l2ad_vdev) {
5147                         remdev = dev;
5148                         break;
5149                 }
5150         }
5151         ASSERT(remdev != NULL);
5152 
5153         /*
5154          * Remove device from global list
5155          */
5156         list_remove(l2arc_dev_list, remdev);
5157         l2arc_dev_last = NULL;          /* may have been invalidated */
5158         atomic_dec_64(&l2arc_ndev);
5159         mutex_exit(&l2arc_dev_mtx);
5160 
5161         /*
5162          * Clear all buflists and ARC references.  L2ARC device flush.
5163          */
5164         l2arc_evict(remdev, 0, B_TRUE);
5165         list_destroy(remdev->l2ad_buflist);
5166         kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5167         kmem_free(remdev, sizeof (l2arc_dev_t));
5168 }
5169 
5170 void
5171 l2arc_init(void)
5172 {
5173         l2arc_thread_exit = 0;
5174         l2arc_ndev = 0;
5175         l2arc_writes_sent = 0;
5176         l2arc_writes_done = 0;
5177 
5178         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5179         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5180         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5181         mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5182         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5183 
5184         l2arc_dev_list = &L2ARC_dev_list;
5185         l2arc_free_on_write = &L2ARC_free_on_write;
5186         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5187             offsetof(l2arc_dev_t, l2ad_node));
5188         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5189             offsetof(l2arc_data_free_t, l2df_list_node));
5190 }
5191 
5192 void
5193 l2arc_fini(void)
5194 {
5195         /*
5196          * This is called from dmu_fini(), which is called from spa_fini();
5197          * Because of this, we can assume that all l2arc devices have
5198          * already been removed when the pools themselves were removed.
5199          */
5200 
5201         l2arc_do_free_on_write();
5202 
5203         mutex_destroy(&l2arc_feed_thr_lock);
5204         cv_destroy(&l2arc_feed_thr_cv);
5205         mutex_destroy(&l2arc_dev_mtx);
5206         mutex_destroy(&l2arc_buflist_mtx);
5207         mutex_destroy(&l2arc_free_on_write_mtx);
5208 
5209         list_destroy(l2arc_dev_list);
5210         list_destroy(l2arc_free_on_write);
5211 }
5212 
5213 void
5214 l2arc_start(void)
5215 {
5216         if (!(spa_mode_global & FWRITE))
5217                 return;
5218 
5219         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5220             TS_RUN, minclsyspri);
5221 }
5222 
5223 void
5224 l2arc_stop(void)
5225 {
5226         if (!(spa_mode_global & FWRITE))
5227                 return;
5228 
5229         mutex_enter(&l2arc_feed_thr_lock);
5230         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
5231         l2arc_thread_exit = 1;
5232         while (l2arc_thread_exit != 0)
5233                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5234         mutex_exit(&l2arc_feed_thr_lock);
5235 }