1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2013 by Delphix. All rights reserved.
  25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  26  */
  27 
  28 /*
  29  * DVA-based Adjustable Replacement Cache
  30  *
  31  * While much of the theory of operation used here is
  32  * based on the self-tuning, low overhead replacement cache
  33  * presented by Megiddo and Modha at FAST 2003, there are some
  34  * significant differences:
  35  *
  36  * 1. The Megiddo and Modha model assumes any page is evictable.
  37  * Pages in its cache cannot be "locked" into memory.  This makes
  38  * the eviction algorithm simple: evict the last page in the list.
  39  * This also make the performance characteristics easy to reason
  40  * about.  Our cache is not so simple.  At any given moment, some
  41  * subset of the blocks in the cache are un-evictable because we
  42  * have handed out a reference to them.  Blocks are only evictable
  43  * when there are no external references active.  This makes
  44  * eviction far more problematic:  we choose to evict the evictable
  45  * blocks that are the "lowest" in the list.
  46  *
  47  * There are times when it is not possible to evict the requested
  48  * space.  In these circumstances we are unable to adjust the cache
  49  * size.  To prevent the cache growing unbounded at these times we
  50  * implement a "cache throttle" that slows the flow of new data
  51  * into the cache until we can make space available.
  52  *
  53  * 2. The Megiddo and Modha model assumes a fixed cache size.
  54  * Pages are evicted when the cache is full and there is a cache
  55  * miss.  Our model has a variable sized cache.  It grows with
  56  * high use, but also tries to react to memory pressure from the
  57  * operating system: decreasing its size when system memory is
  58  * tight.
  59  *
  60  * 3. The Megiddo and Modha model assumes a fixed page size. All
  61  * elements of the cache are therefore exactly the same size.  So
  62  * when adjusting the cache size following a cache miss, its simply
  63  * a matter of choosing a single page to evict.  In our model, we
  64  * have variable sized cache blocks (rangeing from 512 bytes to
  65  * 128K bytes).  We therefore choose a set of blocks to evict to make
  66  * space for a cache miss that approximates as closely as possible
  67  * the space used by the new block.
  68  *
  69  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  70  * by N. Megiddo & D. Modha, FAST 2003
  71  */
  72 
  73 /*
  74  * The locking model:
  75  *
  76  * A new reference to a cache buffer can be obtained in two
  77  * ways: 1) via a hash table lookup using the DVA as a key,
  78  * or 2) via one of the ARC lists.  The arc_read() interface
  79  * uses method 1, while the internal arc algorithms for
  80  * adjusting the cache use method 2.  We therefore provide two
  81  * types of locks: 1) the hash table lock array, and 2) the
  82  * arc list locks.
  83  *
  84  * Buffers do not have their own mutexes, rather they rely on the
  85  * hash table mutexes for the bulk of their protection (i.e. most
  86  * fields in the arc_buf_hdr_t are protected by these mutexes).
  87  *
  88  * buf_hash_find() returns the appropriate mutex (held) when it
  89  * locates the requested buffer in the hash table.  It returns
  90  * NULL for the mutex if the buffer was not in the table.
  91  *
  92  * buf_hash_remove() expects the appropriate hash mutex to be
  93  * already held before it is invoked.
  94  *
  95  * Each arc state also has a mutex which is used to protect the
  96  * buffer list associated with the state.  When attempting to
  97  * obtain a hash table lock while holding an arc list lock you
  98  * must use: mutex_tryenter() to avoid deadlock.  Also note that
  99  * the active state mutex must be held before the ghost state mutex.
 100  *
 101  * Arc buffers may have an associated eviction callback function.
 102  * This function will be invoked prior to removing the buffer (e.g.
 103  * in arc_do_user_evicts()).  Note however that the data associated
 104  * with the buffer may be evicted prior to the callback.  The callback
 105  * must be made with *no locks held* (to prevent deadlock).  Additionally,
 106  * the users of callbacks must ensure that their private data is
 107  * protected from simultaneous callbacks from arc_buf_evict()
 108  * and arc_do_user_evicts().
 109  *
 110  * Note that the majority of the performance stats are manipulated
 111  * with atomic operations.
 112  *
 113  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
 114  *
 115  *      - L2ARC buflist creation
 116  *      - L2ARC buflist eviction
 117  *      - L2ARC write completion, which walks L2ARC buflists
 118  *      - ARC header destruction, as it removes from L2ARC buflists
 119  *      - ARC header release, as it removes from L2ARC buflists
 120  *
 121  * Please note that if you first grab the l2arc_buflist_mtx, you can't do a
 122  * mutex_enter on a buffer's hash_lock anymore due to lock inversion. To grab
 123  * the hash_lock you must use mutex_tryenter and possibly deal with the buffer
 124  * not being available (due to e.g. some other thread holding it while trying
 125  * to unconditionally grab the l2arc_buflist_mtx which you are holding). The
 126  * inverse situation (first grab hash_lock, then l2arc_buflist_mtx) is safe.
 127  */
 128 
 129 #include <sys/spa.h>
 130 #include <sys/zio.h>
 131 #include <sys/zio_compress.h>
 132 #include <sys/zfs_context.h>
 133 #include <sys/arc.h>
 134 #include <sys/refcount.h>
 135 #include <sys/vdev.h>
 136 #include <sys/vdev_impl.h>
 137 #ifdef _KERNEL
 138 #include <sys/vmsystm.h>
 139 #include <vm/anon.h>
 140 #include <sys/fs/swapnode.h>
 141 #include <sys/dnlc.h>
 142 #endif
 143 #include <sys/callb.h>
 144 #include <sys/kstat.h>
 145 #include <zfs_fletcher.h>
 146 
 147 #ifndef _KERNEL
 148 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
 149 boolean_t arc_watch = B_FALSE;
 150 int arc_procfd;
 151 #endif
 152 
 153 static kmutex_t         arc_reclaim_thr_lock;
 154 static kcondvar_t       arc_reclaim_thr_cv;     /* used to signal reclaim thr */
 155 static uint8_t          arc_thread_exit;
 156 
 157 extern int zfs_write_limit_shift;
 158 extern uint64_t zfs_write_limit_max;
 159 extern kmutex_t zfs_write_limit_lock;
 160 
 161 #define ARC_REDUCE_DNLC_PERCENT 3
 162 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
 163 
 164 typedef enum arc_reclaim_strategy {
 165         ARC_RECLAIM_AGGR,               /* Aggressive reclaim strategy */
 166         ARC_RECLAIM_CONS                /* Conservative reclaim strategy */
 167 } arc_reclaim_strategy_t;
 168 
 169 /* number of seconds before growing cache again */
 170 static int              arc_grow_retry = 60;
 171 
 172 /* shift of arc_c for calculating both min and max arc_p */
 173 static int              arc_p_min_shift = 4;
 174 
 175 /* log2(fraction of arc to reclaim) */
 176 static int              arc_shrink_shift = 5;
 177 
 178 /*
 179  * minimum lifespan of a prefetch block in clock ticks
 180  * (initialized in arc_init())
 181  */
 182 static int              arc_min_prefetch_lifespan;
 183 
 184 static int arc_dead;
 185 
 186 /*
 187  * The arc has filled available memory and has now warmed up.
 188  */
 189 static boolean_t arc_warm;
 190 
 191 /*
 192  * These tunables are for performance analysis.
 193  */
 194 uint64_t zfs_arc_max;
 195 uint64_t zfs_arc_min;
 196 uint64_t zfs_arc_meta_limit = 0;
 197 int zfs_arc_grow_retry = 0;
 198 int zfs_arc_shrink_shift = 0;
 199 int zfs_arc_p_min_shift = 0;
 200 int zfs_disable_dup_eviction = 0;
 201 
 202 /*
 203  * Note that buffers can be in one of 6 states:
 204  *      ARC_anon        - anonymous (discussed below)
 205  *      ARC_mru         - recently used, currently cached
 206  *      ARC_mru_ghost   - recentely used, no longer in cache
 207  *      ARC_mfu         - frequently used, currently cached
 208  *      ARC_mfu_ghost   - frequently used, no longer in cache
 209  *      ARC_l2c_only    - exists in L2ARC but not other states
 210  * When there are no active references to the buffer, they are
 211  * are linked onto a list in one of these arc states.  These are
 212  * the only buffers that can be evicted or deleted.  Within each
 213  * state there are multiple lists, one for meta-data and one for
 214  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 215  * etc.) is tracked separately so that it can be managed more
 216  * explicitly: favored over data, limited explicitly.
 217  *
 218  * Anonymous buffers are buffers that are not associated with
 219  * a DVA.  These are buffers that hold dirty block copies
 220  * before they are written to stable storage.  By definition,
 221  * they are "ref'd" and are considered part of arc_mru
 222  * that cannot be freed.  Generally, they will aquire a DVA
 223  * as they are written and migrate onto the arc_mru list.
 224  *
 225  * The ARC_l2c_only state is for buffers that are in the second
 226  * level ARC but no longer in any of the ARC_m* lists.  The second
 227  * level ARC itself may also contain buffers that are in any of
 228  * the ARC_m* states - meaning that a buffer can exist in two
 229  * places.  The reason for the ARC_l2c_only state is to keep the
 230  * buffer header in the hash table, so that reads that hit the
 231  * second level ARC benefit from these fast lookups.
 232  */
 233 
 234 typedef struct arc_state {
 235         list_t  arcs_list[ARC_BUFC_NUMTYPES];   /* list of evictable buffers */
 236         uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
 237         uint64_t arcs_size;     /* total amount of data in this state */
 238         kmutex_t arcs_mtx;
 239 } arc_state_t;
 240 
 241 /* The 6 states: */
 242 static arc_state_t ARC_anon;
 243 static arc_state_t ARC_mru;
 244 static arc_state_t ARC_mru_ghost;
 245 static arc_state_t ARC_mfu;
 246 static arc_state_t ARC_mfu_ghost;
 247 static arc_state_t ARC_l2c_only;
 248 
 249 typedef struct arc_stats {
 250         kstat_named_t arcstat_hits;
 251         kstat_named_t arcstat_misses;
 252         kstat_named_t arcstat_demand_data_hits;
 253         kstat_named_t arcstat_demand_data_misses;
 254         kstat_named_t arcstat_demand_metadata_hits;
 255         kstat_named_t arcstat_demand_metadata_misses;
 256         kstat_named_t arcstat_prefetch_data_hits;
 257         kstat_named_t arcstat_prefetch_data_misses;
 258         kstat_named_t arcstat_prefetch_metadata_hits;
 259         kstat_named_t arcstat_prefetch_metadata_misses;
 260         kstat_named_t arcstat_mru_hits;
 261         kstat_named_t arcstat_mru_ghost_hits;
 262         kstat_named_t arcstat_mfu_hits;
 263         kstat_named_t arcstat_mfu_ghost_hits;
 264         kstat_named_t arcstat_deleted;
 265         kstat_named_t arcstat_recycle_miss;
 266         /*
 267          * Number of buffers that could not be evicted because the hash lock
 268          * was held by another thread.  The lock may not necessarily be held
 269          * by something using the same buffer, since hash locks are shared
 270          * by multiple buffers.
 271          */
 272         kstat_named_t arcstat_mutex_miss;
 273         /*
 274          * Number of buffers skipped because they have I/O in progress, are
 275          * indrect prefetch buffers that have not lived long enough, or are
 276          * not from the spa we're trying to evict from.
 277          */
 278         kstat_named_t arcstat_evict_skip;
 279         kstat_named_t arcstat_evict_l2_cached;
 280         kstat_named_t arcstat_evict_l2_eligible;
 281         kstat_named_t arcstat_evict_l2_ineligible;
 282         kstat_named_t arcstat_hash_elements;
 283         kstat_named_t arcstat_hash_elements_max;
 284         kstat_named_t arcstat_hash_collisions;
 285         kstat_named_t arcstat_hash_chains;
 286         kstat_named_t arcstat_hash_chain_max;
 287         kstat_named_t arcstat_p;
 288         kstat_named_t arcstat_c;
 289         kstat_named_t arcstat_c_min;
 290         kstat_named_t arcstat_c_max;
 291         kstat_named_t arcstat_size;
 292         kstat_named_t arcstat_hdr_size;
 293         kstat_named_t arcstat_data_size;
 294         kstat_named_t arcstat_other_size;
 295         kstat_named_t arcstat_l2_hits;
 296         kstat_named_t arcstat_l2_misses;
 297         kstat_named_t arcstat_l2_feeds;
 298         kstat_named_t arcstat_l2_rw_clash;
 299         kstat_named_t arcstat_l2_read_bytes;
 300         kstat_named_t arcstat_l2_write_bytes;
 301         kstat_named_t arcstat_l2_writes_sent;
 302         kstat_named_t arcstat_l2_writes_done;
 303         kstat_named_t arcstat_l2_writes_error;
 304         kstat_named_t arcstat_l2_writes_hdr_miss;
 305         kstat_named_t arcstat_l2_evict_lock_retry;
 306         kstat_named_t arcstat_l2_evict_reading;
 307         kstat_named_t arcstat_l2_free_on_write;
 308         kstat_named_t arcstat_l2_abort_lowmem;
 309         kstat_named_t arcstat_l2_cksum_bad;
 310         kstat_named_t arcstat_l2_io_error;
 311         kstat_named_t arcstat_l2_size;
 312         kstat_named_t arcstat_l2_asize;
 313         kstat_named_t arcstat_l2_hdr_size;
 314         kstat_named_t arcstat_l2_compress_successes;
 315         kstat_named_t arcstat_l2_compress_zeros;
 316         kstat_named_t arcstat_l2_compress_failures;
 317         kstat_named_t arcstat_memory_throttle_count;
 318         kstat_named_t arcstat_duplicate_buffers;
 319         kstat_named_t arcstat_duplicate_buffers_size;
 320         kstat_named_t arcstat_duplicate_reads;
 321         kstat_named_t arcstat_meta_used;
 322         kstat_named_t arcstat_meta_limit;
 323         kstat_named_t arcstat_meta_max;
 324 } arc_stats_t;
 325 
 326 static arc_stats_t arc_stats = {
 327         { "hits",                       KSTAT_DATA_UINT64 },
 328         { "misses",                     KSTAT_DATA_UINT64 },
 329         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 330         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 331         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 332         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 333         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 334         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 335         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 336         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 337         { "mru_hits",                   KSTAT_DATA_UINT64 },
 338         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 339         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 340         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 341         { "deleted",                    KSTAT_DATA_UINT64 },
 342         { "recycle_miss",               KSTAT_DATA_UINT64 },
 343         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 344         { "evict_skip",                 KSTAT_DATA_UINT64 },
 345         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 346         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 347         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 348         { "hash_elements",              KSTAT_DATA_UINT64 },
 349         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 350         { "hash_collisions",            KSTAT_DATA_UINT64 },
 351         { "hash_chains",                KSTAT_DATA_UINT64 },
 352         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 353         { "p",                          KSTAT_DATA_UINT64 },
 354         { "c",                          KSTAT_DATA_UINT64 },
 355         { "c_min",                      KSTAT_DATA_UINT64 },
 356         { "c_max",                      KSTAT_DATA_UINT64 },
 357         { "size",                       KSTAT_DATA_UINT64 },
 358         { "hdr_size",                   KSTAT_DATA_UINT64 },
 359         { "data_size",                  KSTAT_DATA_UINT64 },
 360         { "other_size",                 KSTAT_DATA_UINT64 },
 361         { "l2_hits",                    KSTAT_DATA_UINT64 },
 362         { "l2_misses",                  KSTAT_DATA_UINT64 },
 363         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 364         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 365         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 366         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 367         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 368         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 369         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 370         { "l2_writes_hdr_miss",         KSTAT_DATA_UINT64 },
 371         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 372         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 373         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 374         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 375         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 376         { "l2_io_error",                KSTAT_DATA_UINT64 },
 377         { "l2_size",                    KSTAT_DATA_UINT64 },
 378         { "l2_asize",                   KSTAT_DATA_UINT64 },
 379         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 380         { "l2_compress_successes",      KSTAT_DATA_UINT64 },
 381         { "l2_compress_zeros",          KSTAT_DATA_UINT64 },
 382         { "l2_compress_failures",       KSTAT_DATA_UINT64 },
 383         { "memory_throttle_count",      KSTAT_DATA_UINT64 },
 384         { "duplicate_buffers",          KSTAT_DATA_UINT64 },
 385         { "duplicate_buffers_size",     KSTAT_DATA_UINT64 },
 386         { "duplicate_reads",            KSTAT_DATA_UINT64 },
 387         { "arc_meta_used",              KSTAT_DATA_UINT64 },
 388         { "arc_meta_limit",             KSTAT_DATA_UINT64 },
 389         { "arc_meta_max",               KSTAT_DATA_UINT64 }
 390 };
 391 
 392 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 393 
 394 #define ARCSTAT_INCR(stat, val) \
 395         atomic_add_64(&arc_stats.stat.value.ui64, (val))
 396 
 397 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 398 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 399 
 400 #define ARCSTAT_MAX(stat, val) {                                        \
 401         uint64_t m;                                                     \
 402         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 403             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 404                 continue;                                               \
 405 }
 406 
 407 #define ARCSTAT_MAXSTAT(stat) \
 408         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 409 
 410 /*
 411  * We define a macro to allow ARC hits/misses to be easily broken down by
 412  * two separate conditions, giving a total of four different subtypes for
 413  * each of hits and misses (so eight statistics total).
 414  */
 415 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 416         if (cond1) {                                                    \
 417                 if (cond2) {                                            \
 418                         ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
 419                 } else {                                                \
 420                         ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
 421                 }                                                       \
 422         } else {                                                        \
 423                 if (cond2) {                                            \
 424                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
 425                 } else {                                                \
 426                         ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
 427                 }                                                       \
 428         }
 429 
 430 kstat_t                 *arc_ksp;
 431 static arc_state_t      *arc_anon;
 432 static arc_state_t      *arc_mru;
 433 static arc_state_t      *arc_mru_ghost;
 434 static arc_state_t      *arc_mfu;
 435 static arc_state_t      *arc_mfu_ghost;
 436 static arc_state_t      *arc_l2c_only;
 437 
 438 /*
 439  * There are several ARC variables that are critical to export as kstats --
 440  * but we don't want to have to grovel around in the kstat whenever we wish to
 441  * manipulate them.  For these variables, we therefore define them to be in
 442  * terms of the statistic variable.  This assures that we are not introducing
 443  * the possibility of inconsistency by having shadow copies of the variables,
 444  * while still allowing the code to be readable.
 445  */
 446 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 447 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 448 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 449 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 450 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 451 #define arc_meta_limit  ARCSTAT(arcstat_meta_limit) /* max size for metadata */
 452 #define arc_meta_used   ARCSTAT(arcstat_meta_used) /* size of metadata */
 453 #define arc_meta_max    ARCSTAT(arcstat_meta_max) /* max size of metadata */
 454 
 455 #define L2ARC_IS_VALID_COMPRESS(_c_) \
 456         ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
 457 
 458 static int              arc_no_grow;    /* Don't try to grow cache size */
 459 static uint64_t         arc_tempreserve;
 460 static uint64_t         arc_loaned_bytes;
 461 
 462 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
 463 
 464 typedef struct arc_callback arc_callback_t;
 465 
 466 struct arc_callback {
 467         void                    *acb_private;
 468         arc_done_func_t         *acb_done;
 469         arc_buf_t               *acb_buf;
 470         zio_t                   *acb_zio_dummy;
 471         arc_callback_t          *acb_next;
 472 };
 473 
 474 typedef struct arc_write_callback arc_write_callback_t;
 475 
 476 struct arc_write_callback {
 477         void            *awcb_private;
 478         arc_done_func_t *awcb_ready;
 479         arc_done_func_t *awcb_done;
 480         arc_buf_t       *awcb_buf;
 481 };
 482 
 483 struct arc_buf_hdr {
 484         /* protected by hash lock */
 485         dva_t                   b_dva;
 486         uint64_t                b_birth;
 487         uint64_t                b_cksum0;
 488 
 489         kmutex_t                b_freeze_lock;
 490         zio_cksum_t             *b_freeze_cksum;
 491         void                    *b_thawed;
 492 
 493         arc_buf_hdr_t           *b_hash_next;
 494         arc_buf_t               *b_buf;
 495         uint32_t                b_flags;
 496         uint32_t                b_datacnt;
 497 
 498         arc_callback_t          *b_acb;
 499         kcondvar_t              b_cv;
 500 
 501         /* immutable */
 502         arc_buf_contents_t      b_type;
 503         uint64_t                b_size;
 504         uint64_t                b_spa;
 505 
 506         /* protected by arc state mutex */
 507         arc_state_t             *b_state;
 508         list_node_t             b_arc_node;
 509 
 510         /* updated atomically */
 511         clock_t                 b_arc_access;
 512 
 513         /* self protecting */
 514         refcount_t              b_refcnt;
 515 
 516         l2arc_buf_hdr_t         *b_l2hdr;
 517         list_node_t             b_l2node;
 518 };
 519 
 520 static arc_buf_t *arc_eviction_list;
 521 static kmutex_t arc_eviction_mtx;
 522 static arc_buf_hdr_t arc_eviction_hdr;
 523 static void arc_get_data_buf(arc_buf_t *buf);
 524 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
 525 static int arc_evict_needed(arc_buf_contents_t type);
 526 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
 527 static void arc_buf_watch(arc_buf_t *buf);
 528 
 529 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
 530 
 531 #define GHOST_STATE(state)      \
 532         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
 533         (state) == arc_l2c_only)
 534 
 535 /*
 536  * Private ARC flags.  These flags are private ARC only flags that will show up
 537  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
 538  * be passed in as arc_flags in things like arc_read.  However, these flags
 539  * should never be passed and should only be set by ARC code.  When adding new
 540  * public flags, make sure not to smash the private ones.
 541  */
 542 
 543 #define ARC_IN_HASH_TABLE       (1 << 9)  /* this buffer is hashed */
 544 #define ARC_IO_IN_PROGRESS      (1 << 10) /* I/O in progress for buf */
 545 #define ARC_IO_ERROR            (1 << 11) /* I/O failed for buf */
 546 #define ARC_FREED_IN_READ       (1 << 12) /* buf freed while in read */
 547 #define ARC_BUF_AVAILABLE       (1 << 13) /* block not in active use */
 548 #define ARC_INDIRECT            (1 << 14) /* this is an indirect block */
 549 #define ARC_FREE_IN_PROGRESS    (1 << 15) /* hdr about to be freed */
 550 #define ARC_L2_WRITING          (1 << 16) /* L2ARC write in progress */
 551 #define ARC_L2_EVICTED          (1 << 17) /* evicted during I/O */
 552 #define ARC_L2_WRITE_HEAD       (1 << 18) /* head of write list */
 553 
 554 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_IN_HASH_TABLE)
 555 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
 556 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_IO_ERROR)
 557 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_PREFETCH)
 558 #define HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FREED_IN_READ)
 559 #define HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_BUF_AVAILABLE)
 560 #define HDR_FREE_IN_PROGRESS(hdr)       ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
 561 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_L2CACHE)
 562 #define HDR_L2_READING(hdr)     ((hdr)->b_flags & ARC_IO_IN_PROGRESS &&  \
 563                                     (hdr)->b_l2hdr != NULL)
 564 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_L2_WRITING)
 565 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_L2_EVICTED)
 566 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
 567 
 568 /*
 569  * Other sizes
 570  */
 571 
 572 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
 573 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
 574 
 575 /*
 576  * Hash table routines
 577  */
 578 
 579 #define HT_LOCK_PAD     64
 580 
 581 struct ht_lock {
 582         kmutex_t        ht_lock;
 583 #ifdef _KERNEL
 584         unsigned char   pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
 585 #endif
 586 };
 587 
 588 #define BUF_LOCKS 256
 589 typedef struct buf_hash_table {
 590         uint64_t ht_mask;
 591         arc_buf_hdr_t **ht_table;
 592         struct ht_lock ht_locks[BUF_LOCKS];
 593 } buf_hash_table_t;
 594 
 595 static buf_hash_table_t buf_hash_table;
 596 
 597 #define BUF_HASH_INDEX(spa, dva, birth) \
 598         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
 599 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
 600 #define BUF_HASH_LOCK(idx)      (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
 601 #define HDR_LOCK(hdr) \
 602         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
 603 
 604 uint64_t zfs_crc64_table[256];
 605 
 606 /*
 607  * Level 2 ARC
 608  */
 609 
 610 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 611 #define L2ARC_HEADROOM          2                       /* num of writes */
 612 /*
 613  * If we discover during ARC scan any buffers to be compressed, we boost
 614  * our headroom for the next scanning cycle by this percentage multiple.
 615  */
 616 #define L2ARC_HEADROOM_BOOST    200
 617 #define L2ARC_FEED_SECS         1               /* caching interval secs */
 618 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 619 
 620 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 621 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 622 
 623 /* L2ARC Performance Tunables */
 624 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 625 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 626 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 627 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
 628 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 629 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 630 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 631 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 632 boolean_t l2arc_norw = B_FALSE;                 /* no reads during writes */
 633 
 634 /*
 635  * L2ARC Internals
 636  */
 637 typedef struct l2arc_dev {
 638         vdev_t                  *l2ad_vdev;     /* vdev */
 639         spa_t                   *l2ad_spa;      /* spa */
 640         uint64_t                l2ad_hand;      /* next write location */
 641         uint64_t                l2ad_start;     /* first addr on device */
 642         uint64_t                l2ad_end;       /* last addr on device */
 643         uint64_t                l2ad_evict;     /* last addr eviction reached */
 644         boolean_t               l2ad_first;     /* first sweep through */
 645         boolean_t               l2ad_writing;   /* currently writing */
 646         list_t                  *l2ad_buflist;  /* buffer list */
 647         list_node_t             l2ad_node;      /* device list node */
 648 } l2arc_dev_t;
 649 
 650 static list_t L2ARC_dev_list;                   /* device list */
 651 static list_t *l2arc_dev_list;                  /* device list pointer */
 652 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
 653 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
 654 static kmutex_t l2arc_buflist_mtx;              /* mutex for all buflists */
 655 static list_t L2ARC_free_on_write;              /* free after write buf list */
 656 static list_t *l2arc_free_on_write;             /* free after write list ptr */
 657 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
 658 static uint64_t l2arc_ndev;                     /* number of devices */
 659 
 660 typedef struct l2arc_read_callback {
 661         arc_buf_t               *l2rcb_buf;             /* read buffer */
 662         spa_t                   *l2rcb_spa;             /* spa */
 663         blkptr_t                l2rcb_bp;               /* original blkptr */
 664         zbookmark_t             l2rcb_zb;               /* original bookmark */
 665         int                     l2rcb_flags;            /* original flags */
 666         enum zio_compress       l2rcb_compress;         /* applied compress */
 667 } l2arc_read_callback_t;
 668 
 669 typedef struct l2arc_write_callback {
 670         l2arc_dev_t     *l2wcb_dev;             /* device info */
 671         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
 672 } l2arc_write_callback_t;
 673 
 674 struct l2arc_buf_hdr {
 675         /* protected by arc_buf_hdr  mutex */
 676         l2arc_dev_t             *b_dev;         /* L2ARC device */
 677         uint64_t                b_daddr;        /* disk address, offset byte */
 678         /* compression applied to buffer data */
 679         enum zio_compress       b_compress;
 680         /* real alloc'd buffer size depending on b_compress applied */
 681         int                     b_asize;
 682 };
 683 
 684 typedef struct l2arc_data_free {
 685         /* protected by l2arc_free_on_write_mtx */
 686         void            *l2df_data;
 687         size_t          l2df_size;
 688         void            (*l2df_func)(void *, size_t);
 689         list_node_t     l2df_list_node;
 690 } l2arc_data_free_t;
 691 
 692 static kmutex_t l2arc_feed_thr_lock;
 693 static kcondvar_t l2arc_feed_thr_cv;
 694 static uint8_t l2arc_thread_exit;
 695 
 696 static void l2arc_read_done(zio_t *zio);
 697 static void l2arc_hdr_stat_add(void);
 698 static void l2arc_hdr_stat_remove(void);
 699 
 700 static boolean_t l2arc_compress_buf(void *in_data, uint64_t in_sz,
 701     void **out_data, uint64_t *out_sz, enum zio_compress *compress);
 702 static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
 703     enum zio_compress c);
 704 
 705 static uint64_t
 706 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 707 {
 708         uint8_t *vdva = (uint8_t *)dva;
 709         uint64_t crc = -1ULL;
 710         int i;
 711 
 712         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 713 
 714         for (i = 0; i < sizeof (dva_t); i++)
 715                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
 716 
 717         crc ^= (spa>>8) ^ birth;
 718 
 719         return (crc);
 720 }
 721 
 722 #define BUF_EMPTY(buf)                                          \
 723         ((buf)->b_dva.dva_word[0] == 0 &&                    \
 724         (buf)->b_dva.dva_word[1] == 0 &&                     \
 725         (buf)->b_birth == 0)
 726 
 727 #define BUF_EQUAL(spa, dva, birth, buf)                         \
 728         ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
 729         ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
 730         ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
 731 
 732 static void
 733 buf_discard_identity(arc_buf_hdr_t *hdr)
 734 {
 735         hdr->b_dva.dva_word[0] = 0;
 736         hdr->b_dva.dva_word[1] = 0;
 737         hdr->b_birth = 0;
 738         hdr->b_cksum0 = 0;
 739 }
 740 
 741 static arc_buf_hdr_t *
 742 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
 743 {
 744         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 745         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 746         arc_buf_hdr_t *buf;
 747 
 748         mutex_enter(hash_lock);
 749         for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
 750             buf = buf->b_hash_next) {
 751                 if (BUF_EQUAL(spa, dva, birth, buf)) {
 752                         *lockp = hash_lock;
 753                         return (buf);
 754                 }
 755         }
 756         mutex_exit(hash_lock);
 757         *lockp = NULL;
 758         return (NULL);
 759 }
 760 
 761 /*
 762  * Insert an entry into the hash table.  If there is already an element
 763  * equal to elem in the hash table, then the already existing element
 764  * will be returned and the new element will not be inserted.
 765  * Otherwise returns NULL.
 766  */
 767 static arc_buf_hdr_t *
 768 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
 769 {
 770         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 771         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 772         arc_buf_hdr_t *fbuf;
 773         uint32_t i;
 774 
 775         ASSERT(!HDR_IN_HASH_TABLE(buf));
 776         *lockp = hash_lock;
 777         mutex_enter(hash_lock);
 778         for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
 779             fbuf = fbuf->b_hash_next, i++) {
 780                 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
 781                         return (fbuf);
 782         }
 783 
 784         buf->b_hash_next = buf_hash_table.ht_table[idx];
 785         buf_hash_table.ht_table[idx] = buf;
 786         buf->b_flags |= ARC_IN_HASH_TABLE;
 787 
 788         /* collect some hash table performance data */
 789         if (i > 0) {
 790                 ARCSTAT_BUMP(arcstat_hash_collisions);
 791                 if (i == 1)
 792                         ARCSTAT_BUMP(arcstat_hash_chains);
 793 
 794                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
 795         }
 796 
 797         ARCSTAT_BUMP(arcstat_hash_elements);
 798         ARCSTAT_MAXSTAT(arcstat_hash_elements);
 799 
 800         return (NULL);
 801 }
 802 
 803 static void
 804 buf_hash_remove(arc_buf_hdr_t *buf)
 805 {
 806         arc_buf_hdr_t *fbuf, **bufp;
 807         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 808 
 809         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
 810         ASSERT(HDR_IN_HASH_TABLE(buf));
 811 
 812         bufp = &buf_hash_table.ht_table[idx];
 813         while ((fbuf = *bufp) != buf) {
 814                 ASSERT(fbuf != NULL);
 815                 bufp = &fbuf->b_hash_next;
 816         }
 817         *bufp = buf->b_hash_next;
 818         buf->b_hash_next = NULL;
 819         buf->b_flags &= ~ARC_IN_HASH_TABLE;
 820 
 821         /* collect some hash table performance data */
 822         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
 823 
 824         if (buf_hash_table.ht_table[idx] &&
 825             buf_hash_table.ht_table[idx]->b_hash_next == NULL)
 826                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
 827 }
 828 
 829 /*
 830  * Global data structures and functions for the buf kmem cache.
 831  */
 832 static kmem_cache_t *hdr_cache;
 833 static kmem_cache_t *buf_cache;
 834 
 835 static void
 836 buf_fini(void)
 837 {
 838         int i;
 839 
 840         kmem_free(buf_hash_table.ht_table,
 841             (buf_hash_table.ht_mask + 1) * sizeof (void *));
 842         for (i = 0; i < BUF_LOCKS; i++)
 843                 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
 844         kmem_cache_destroy(hdr_cache);
 845         kmem_cache_destroy(buf_cache);
 846 }
 847 
 848 /*
 849  * Constructor callback - called when the cache is empty
 850  * and a new buf is requested.
 851  */
 852 /* ARGSUSED */
 853 static int
 854 hdr_cons(void *vbuf, void *unused, int kmflag)
 855 {
 856         arc_buf_hdr_t *buf = vbuf;
 857 
 858         bzero(buf, sizeof (arc_buf_hdr_t));
 859         refcount_create(&buf->b_refcnt);
 860         cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
 861         mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 862         arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 863 
 864         return (0);
 865 }
 866 
 867 /* ARGSUSED */
 868 static int
 869 buf_cons(void *vbuf, void *unused, int kmflag)
 870 {
 871         arc_buf_t *buf = vbuf;
 872 
 873         bzero(buf, sizeof (arc_buf_t));
 874         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
 875         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 876 
 877         return (0);
 878 }
 879 
 880 /*
 881  * Destructor callback - called when a cached buf is
 882  * no longer required.
 883  */
 884 /* ARGSUSED */
 885 static void
 886 hdr_dest(void *vbuf, void *unused)
 887 {
 888         arc_buf_hdr_t *buf = vbuf;
 889 
 890         ASSERT(BUF_EMPTY(buf));
 891         refcount_destroy(&buf->b_refcnt);
 892         cv_destroy(&buf->b_cv);
 893         mutex_destroy(&buf->b_freeze_lock);
 894         arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 895 }
 896 
 897 /* ARGSUSED */
 898 static void
 899 buf_dest(void *vbuf, void *unused)
 900 {
 901         arc_buf_t *buf = vbuf;
 902 
 903         mutex_destroy(&buf->b_evict_lock);
 904         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 905 }
 906 
 907 /*
 908  * Reclaim callback -- invoked when memory is low.
 909  */
 910 /* ARGSUSED */
 911 static void
 912 hdr_recl(void *unused)
 913 {
 914         dprintf("hdr_recl called\n");
 915         /*
 916          * umem calls the reclaim func when we destroy the buf cache,
 917          * which is after we do arc_fini().
 918          */
 919         if (!arc_dead)
 920                 cv_signal(&arc_reclaim_thr_cv);
 921 }
 922 
 923 static void
 924 buf_init(void)
 925 {
 926         uint64_t *ct;
 927         uint64_t hsize = 1ULL << 12;
 928         int i, j;
 929 
 930         /*
 931          * The hash table is big enough to fill all of physical memory
 932          * with an average 64K block size.  The table will take up
 933          * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
 934          */
 935         while (hsize * 65536 < physmem * PAGESIZE)
 936                 hsize <<= 1;
 937 retry:
 938         buf_hash_table.ht_mask = hsize - 1;
 939         buf_hash_table.ht_table =
 940             kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
 941         if (buf_hash_table.ht_table == NULL) {
 942                 ASSERT(hsize > (1ULL << 8));
 943                 hsize >>= 1;
 944                 goto retry;
 945         }
 946 
 947         hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
 948             0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
 949         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
 950             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
 951 
 952         for (i = 0; i < 256; i++)
 953                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
 954                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
 955 
 956         for (i = 0; i < BUF_LOCKS; i++) {
 957                 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
 958                     NULL, MUTEX_DEFAULT, NULL);
 959         }
 960 }
 961 
 962 #define ARC_MINTIME     (hz>>4) /* 62 ms */
 963 
 964 static void
 965 arc_cksum_verify(arc_buf_t *buf)
 966 {
 967         zio_cksum_t zc;
 968 
 969         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
 970                 return;
 971 
 972         mutex_enter(&buf->b_hdr->b_freeze_lock);
 973         if (buf->b_hdr->b_freeze_cksum == NULL ||
 974             (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
 975                 mutex_exit(&buf->b_hdr->b_freeze_lock);
 976                 return;
 977         }
 978         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 979         if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
 980                 panic("buffer modified while frozen!");
 981         mutex_exit(&buf->b_hdr->b_freeze_lock);
 982 }
 983 
 984 static int
 985 arc_cksum_equal(arc_buf_t *buf)
 986 {
 987         zio_cksum_t zc;
 988         int equal;
 989 
 990         mutex_enter(&buf->b_hdr->b_freeze_lock);
 991         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 992         equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
 993         mutex_exit(&buf->b_hdr->b_freeze_lock);
 994 
 995         return (equal);
 996 }
 997 
 998 static void
 999 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1000 {
1001         if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1002                 return;
1003 
1004         mutex_enter(&buf->b_hdr->b_freeze_lock);
1005         if (buf->b_hdr->b_freeze_cksum != NULL) {
1006                 mutex_exit(&buf->b_hdr->b_freeze_lock);
1007                 return;
1008         }
1009         buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1010         fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1011             buf->b_hdr->b_freeze_cksum);
1012         mutex_exit(&buf->b_hdr->b_freeze_lock);
1013         arc_buf_watch(buf);
1014 }
1015 
1016 #ifndef _KERNEL
1017 typedef struct procctl {
1018         long cmd;
1019         prwatch_t prwatch;
1020 } procctl_t;
1021 #endif
1022 
1023 /* ARGSUSED */
1024 static void
1025 arc_buf_unwatch(arc_buf_t *buf)
1026 {
1027 #ifndef _KERNEL
1028         if (arc_watch) {
1029                 int result;
1030                 procctl_t ctl;
1031                 ctl.cmd = PCWATCH;
1032                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1033                 ctl.prwatch.pr_size = 0;
1034                 ctl.prwatch.pr_wflags = 0;
1035                 result = write(arc_procfd, &ctl, sizeof (ctl));
1036                 ASSERT3U(result, ==, sizeof (ctl));
1037         }
1038 #endif
1039 }
1040 
1041 /* ARGSUSED */
1042 static void
1043 arc_buf_watch(arc_buf_t *buf)
1044 {
1045 #ifndef _KERNEL
1046         if (arc_watch) {
1047                 int result;
1048                 procctl_t ctl;
1049                 ctl.cmd = PCWATCH;
1050                 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1051                 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1052                 ctl.prwatch.pr_wflags = WA_WRITE;
1053                 result = write(arc_procfd, &ctl, sizeof (ctl));
1054                 ASSERT3U(result, ==, sizeof (ctl));
1055         }
1056 #endif
1057 }
1058 
1059 void
1060 arc_buf_thaw(arc_buf_t *buf)
1061 {
1062         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1063                 if (buf->b_hdr->b_state != arc_anon)
1064                         panic("modifying non-anon buffer!");
1065                 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1066                         panic("modifying buffer while i/o in progress!");
1067                 arc_cksum_verify(buf);
1068         }
1069 
1070         mutex_enter(&buf->b_hdr->b_freeze_lock);
1071         if (buf->b_hdr->b_freeze_cksum != NULL) {
1072                 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1073                 buf->b_hdr->b_freeze_cksum = NULL;
1074         }
1075 
1076         if (zfs_flags & ZFS_DEBUG_MODIFY) {
1077                 if (buf->b_hdr->b_thawed)
1078                         kmem_free(buf->b_hdr->b_thawed, 1);
1079                 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1080         }
1081 
1082         mutex_exit(&buf->b_hdr->b_freeze_lock);
1083 
1084         arc_buf_unwatch(buf);
1085 }
1086 
1087 void
1088 arc_buf_freeze(arc_buf_t *buf)
1089 {
1090         kmutex_t *hash_lock;
1091 
1092         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1093                 return;
1094 
1095         hash_lock = HDR_LOCK(buf->b_hdr);
1096         mutex_enter(hash_lock);
1097 
1098         ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1099             buf->b_hdr->b_state == arc_anon);
1100         arc_cksum_compute(buf, B_FALSE);
1101         mutex_exit(hash_lock);
1102 
1103 }
1104 
1105 static void
1106 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1107 {
1108         ASSERT(MUTEX_HELD(hash_lock));
1109 
1110         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1111             (ab->b_state != arc_anon)) {
1112                 uint64_t delta = ab->b_size * ab->b_datacnt;
1113                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1114                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1115 
1116                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1117                 mutex_enter(&ab->b_state->arcs_mtx);
1118                 ASSERT(list_link_active(&ab->b_arc_node));
1119                 list_remove(list, ab);
1120                 if (GHOST_STATE(ab->b_state)) {
1121                         ASSERT0(ab->b_datacnt);
1122                         ASSERT3P(ab->b_buf, ==, NULL);
1123                         delta = ab->b_size;
1124                 }
1125                 ASSERT(delta > 0);
1126                 ASSERT3U(*size, >=, delta);
1127                 atomic_add_64(size, -delta);
1128                 mutex_exit(&ab->b_state->arcs_mtx);
1129                 /* remove the prefetch flag if we get a reference */
1130                 if (ab->b_flags & ARC_PREFETCH)
1131                         ab->b_flags &= ~ARC_PREFETCH;
1132         }
1133 }
1134 
1135 static int
1136 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1137 {
1138         int cnt;
1139         arc_state_t *state = ab->b_state;
1140 
1141         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1142         ASSERT(!GHOST_STATE(state));
1143 
1144         if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1145             (state != arc_anon)) {
1146                 uint64_t *size = &state->arcs_lsize[ab->b_type];
1147 
1148                 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1149                 mutex_enter(&state->arcs_mtx);
1150                 ASSERT(!list_link_active(&ab->b_arc_node));
1151                 list_insert_head(&state->arcs_list[ab->b_type], ab);
1152                 ASSERT(ab->b_datacnt > 0);
1153                 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1154                 mutex_exit(&state->arcs_mtx);
1155         }
1156         return (cnt);
1157 }
1158 
1159 /*
1160  * Move the supplied buffer to the indicated state.  The mutex
1161  * for the buffer must be held by the caller.
1162  */
1163 static void
1164 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1165 {
1166         arc_state_t *old_state = ab->b_state;
1167         int64_t refcnt = refcount_count(&ab->b_refcnt);
1168         uint64_t from_delta, to_delta;
1169 
1170         ASSERT(MUTEX_HELD(hash_lock));
1171         ASSERT(new_state != old_state);
1172         ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1173         ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1174         ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1175 
1176         from_delta = to_delta = ab->b_datacnt * ab->b_size;
1177 
1178         /*
1179          * If this buffer is evictable, transfer it from the
1180          * old state list to the new state list.
1181          */
1182         if (refcnt == 0) {
1183                 if (old_state != arc_anon) {
1184                         int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1185                         uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1186 
1187                         if (use_mutex)
1188                                 mutex_enter(&old_state->arcs_mtx);
1189 
1190                         ASSERT(list_link_active(&ab->b_arc_node));
1191                         list_remove(&old_state->arcs_list[ab->b_type], ab);
1192 
1193                         /*
1194                          * If prefetching out of the ghost cache,
1195                          * we will have a non-zero datacnt.
1196                          */
1197                         if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1198                                 /* ghost elements have a ghost size */
1199                                 ASSERT(ab->b_buf == NULL);
1200                                 from_delta = ab->b_size;
1201                         }
1202                         ASSERT3U(*size, >=, from_delta);
1203                         atomic_add_64(size, -from_delta);
1204 
1205                         if (use_mutex)
1206                                 mutex_exit(&old_state->arcs_mtx);
1207                 }
1208                 if (new_state != arc_anon) {
1209                         int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1210                         uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1211 
1212                         if (use_mutex)
1213                                 mutex_enter(&new_state->arcs_mtx);
1214 
1215                         list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1216 
1217                         /* ghost elements have a ghost size */
1218                         if (GHOST_STATE(new_state)) {
1219                                 ASSERT(ab->b_datacnt == 0);
1220                                 ASSERT(ab->b_buf == NULL);
1221                                 to_delta = ab->b_size;
1222                         }
1223                         atomic_add_64(size, to_delta);
1224 
1225                         if (use_mutex)
1226                                 mutex_exit(&new_state->arcs_mtx);
1227                 }
1228         }
1229 
1230         ASSERT(!BUF_EMPTY(ab));
1231         if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1232                 buf_hash_remove(ab);
1233 
1234         /* adjust state sizes */
1235         if (to_delta)
1236                 atomic_add_64(&new_state->arcs_size, to_delta);
1237         if (from_delta) {
1238                 ASSERT3U(old_state->arcs_size, >=, from_delta);
1239                 atomic_add_64(&old_state->arcs_size, -from_delta);
1240         }
1241         ab->b_state = new_state;
1242 
1243         /* adjust l2arc hdr stats */
1244         if (new_state == arc_l2c_only)
1245                 l2arc_hdr_stat_add();
1246         else if (old_state == arc_l2c_only)
1247                 l2arc_hdr_stat_remove();
1248 }
1249 
1250 void
1251 arc_space_consume(uint64_t space, arc_space_type_t type)
1252 {
1253         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1254 
1255         switch (type) {
1256         case ARC_SPACE_DATA:
1257                 ARCSTAT_INCR(arcstat_data_size, space);
1258                 break;
1259         case ARC_SPACE_OTHER:
1260                 ARCSTAT_INCR(arcstat_other_size, space);
1261                 break;
1262         case ARC_SPACE_HDRS:
1263                 ARCSTAT_INCR(arcstat_hdr_size, space);
1264                 break;
1265         case ARC_SPACE_L2HDRS:
1266                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1267                 break;
1268         }
1269 
1270         ARCSTAT_INCR(arcstat_meta_used, space);
1271         atomic_add_64(&arc_size, space);
1272 }
1273 
1274 void
1275 arc_space_return(uint64_t space, arc_space_type_t type)
1276 {
1277         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1278 
1279         switch (type) {
1280         case ARC_SPACE_DATA:
1281                 ARCSTAT_INCR(arcstat_data_size, -space);
1282                 break;
1283         case ARC_SPACE_OTHER:
1284                 ARCSTAT_INCR(arcstat_other_size, -space);
1285                 break;
1286         case ARC_SPACE_HDRS:
1287                 ARCSTAT_INCR(arcstat_hdr_size, -space);
1288                 break;
1289         case ARC_SPACE_L2HDRS:
1290                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1291                 break;
1292         }
1293 
1294         ASSERT(arc_meta_used >= space);
1295         if (arc_meta_max < arc_meta_used)
1296                 arc_meta_max = arc_meta_used;
1297         ARCSTAT_INCR(arcstat_meta_used, -space);
1298         ASSERT(arc_size >= space);
1299         atomic_add_64(&arc_size, -space);
1300 }
1301 
1302 void *
1303 arc_data_buf_alloc(uint64_t size)
1304 {
1305         if (arc_evict_needed(ARC_BUFC_DATA))
1306                 cv_signal(&arc_reclaim_thr_cv);
1307         atomic_add_64(&arc_size, size);
1308         return (zio_data_buf_alloc(size));
1309 }
1310 
1311 void
1312 arc_data_buf_free(void *buf, uint64_t size)
1313 {
1314         zio_data_buf_free(buf, size);
1315         ASSERT(arc_size >= size);
1316         atomic_add_64(&arc_size, -size);
1317 }
1318 
1319 arc_buf_t *
1320 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1321 {
1322         arc_buf_hdr_t *hdr;
1323         arc_buf_t *buf;
1324 
1325         ASSERT3U(size, >, 0);
1326         hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1327         ASSERT(BUF_EMPTY(hdr));
1328         hdr->b_size = size;
1329         hdr->b_type = type;
1330         hdr->b_spa = spa_load_guid(spa);
1331         hdr->b_state = arc_anon;
1332         hdr->b_arc_access = 0;
1333         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1334         buf->b_hdr = hdr;
1335         buf->b_data = NULL;
1336         buf->b_efunc = NULL;
1337         buf->b_private = NULL;
1338         buf->b_next = NULL;
1339         hdr->b_buf = buf;
1340         arc_get_data_buf(buf);
1341         hdr->b_datacnt = 1;
1342         hdr->b_flags = 0;
1343         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1344         (void) refcount_add(&hdr->b_refcnt, tag);
1345 
1346         return (buf);
1347 }
1348 
1349 static char *arc_onloan_tag = "onloan";
1350 
1351 /*
1352  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1353  * flight data by arc_tempreserve_space() until they are "returned". Loaned
1354  * buffers must be returned to the arc before they can be used by the DMU or
1355  * freed.
1356  */
1357 arc_buf_t *
1358 arc_loan_buf(spa_t *spa, int size)
1359 {
1360         arc_buf_t *buf;
1361 
1362         buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1363 
1364         atomic_add_64(&arc_loaned_bytes, size);
1365         return (buf);
1366 }
1367 
1368 /*
1369  * Return a loaned arc buffer to the arc.
1370  */
1371 void
1372 arc_return_buf(arc_buf_t *buf, void *tag)
1373 {
1374         arc_buf_hdr_t *hdr = buf->b_hdr;
1375 
1376         ASSERT(buf->b_data != NULL);
1377         (void) refcount_add(&hdr->b_refcnt, tag);
1378         (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1379 
1380         atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1381 }
1382 
1383 /* Detach an arc_buf from a dbuf (tag) */
1384 void
1385 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1386 {
1387         arc_buf_hdr_t *hdr;
1388 
1389         ASSERT(buf->b_data != NULL);
1390         hdr = buf->b_hdr;
1391         (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1392         (void) refcount_remove(&hdr->b_refcnt, tag);
1393         buf->b_efunc = NULL;
1394         buf->b_private = NULL;
1395 
1396         atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1397 }
1398 
1399 static arc_buf_t *
1400 arc_buf_clone(arc_buf_t *from)
1401 {
1402         arc_buf_t *buf;
1403         arc_buf_hdr_t *hdr = from->b_hdr;
1404         uint64_t size = hdr->b_size;
1405 
1406         ASSERT(hdr->b_state != arc_anon);
1407 
1408         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1409         buf->b_hdr = hdr;
1410         buf->b_data = NULL;
1411         buf->b_efunc = NULL;
1412         buf->b_private = NULL;
1413         buf->b_next = hdr->b_buf;
1414         hdr->b_buf = buf;
1415         arc_get_data_buf(buf);
1416         bcopy(from->b_data, buf->b_data, size);
1417 
1418         /*
1419          * This buffer already exists in the arc so create a duplicate
1420          * copy for the caller.  If the buffer is associated with user data
1421          * then track the size and number of duplicates.  These stats will be
1422          * updated as duplicate buffers are created and destroyed.
1423          */
1424         if (hdr->b_type == ARC_BUFC_DATA) {
1425                 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1426                 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1427         }
1428         hdr->b_datacnt += 1;
1429         return (buf);
1430 }
1431 
1432 void
1433 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1434 {
1435         arc_buf_hdr_t *hdr;
1436         kmutex_t *hash_lock;
1437 
1438         /*
1439          * Check to see if this buffer is evicted.  Callers
1440          * must verify b_data != NULL to know if the add_ref
1441          * was successful.
1442          */
1443         mutex_enter(&buf->b_evict_lock);
1444         if (buf->b_data == NULL) {
1445                 mutex_exit(&buf->b_evict_lock);
1446                 return;
1447         }
1448         hash_lock = HDR_LOCK(buf->b_hdr);
1449         mutex_enter(hash_lock);
1450         hdr = buf->b_hdr;
1451         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1452         mutex_exit(&buf->b_evict_lock);
1453 
1454         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1455         add_reference(hdr, hash_lock, tag);
1456         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1457         arc_access(hdr, hash_lock);
1458         mutex_exit(hash_lock);
1459         ARCSTAT_BUMP(arcstat_hits);
1460         ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1461             demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1462             data, metadata, hits);
1463 }
1464 
1465 /*
1466  * Free the arc data buffer.  If it is an l2arc write in progress,
1467  * the buffer is placed on l2arc_free_on_write to be freed later.
1468  */
1469 static void
1470 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1471 {
1472         arc_buf_hdr_t *hdr = buf->b_hdr;
1473 
1474         if (HDR_L2_WRITING(hdr)) {
1475                 l2arc_data_free_t *df;
1476                 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1477                 df->l2df_data = buf->b_data;
1478                 df->l2df_size = hdr->b_size;
1479                 df->l2df_func = free_func;
1480                 mutex_enter(&l2arc_free_on_write_mtx);
1481                 list_insert_head(l2arc_free_on_write, df);
1482                 mutex_exit(&l2arc_free_on_write_mtx);
1483                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1484         } else {
1485                 free_func(buf->b_data, hdr->b_size);
1486         }
1487 }
1488 
1489 static void
1490 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1491 {
1492         arc_buf_t **bufp;
1493 
1494         /* free up data associated with the buf */
1495         if (buf->b_data) {
1496                 arc_state_t *state = buf->b_hdr->b_state;
1497                 uint64_t size = buf->b_hdr->b_size;
1498                 arc_buf_contents_t type = buf->b_hdr->b_type;
1499 
1500                 arc_cksum_verify(buf);
1501                 arc_buf_unwatch(buf);
1502 
1503                 if (!recycle) {
1504                         if (type == ARC_BUFC_METADATA) {
1505                                 arc_buf_data_free(buf, zio_buf_free);
1506                                 arc_space_return(size, ARC_SPACE_DATA);
1507                         } else {
1508                                 ASSERT(type == ARC_BUFC_DATA);
1509                                 arc_buf_data_free(buf, zio_data_buf_free);
1510                                 ARCSTAT_INCR(arcstat_data_size, -size);
1511                                 atomic_add_64(&arc_size, -size);
1512                         }
1513                 }
1514                 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1515                         uint64_t *cnt = &state->arcs_lsize[type];
1516 
1517                         ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1518                         ASSERT(state != arc_anon);
1519 
1520                         ASSERT3U(*cnt, >=, size);
1521                         atomic_add_64(cnt, -size);
1522                 }
1523                 ASSERT3U(state->arcs_size, >=, size);
1524                 atomic_add_64(&state->arcs_size, -size);
1525                 buf->b_data = NULL;
1526 
1527                 /*
1528                  * If we're destroying a duplicate buffer make sure
1529                  * that the appropriate statistics are updated.
1530                  */
1531                 if (buf->b_hdr->b_datacnt > 1 &&
1532                     buf->b_hdr->b_type == ARC_BUFC_DATA) {
1533                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1534                         ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1535                 }
1536                 ASSERT(buf->b_hdr->b_datacnt > 0);
1537                 buf->b_hdr->b_datacnt -= 1;
1538         }
1539 
1540         /* only remove the buf if requested */
1541         if (!all)
1542                 return;
1543 
1544         /* remove the buf from the hdr list */
1545         for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1546                 continue;
1547         *bufp = buf->b_next;
1548         buf->b_next = NULL;
1549 
1550         ASSERT(buf->b_efunc == NULL);
1551 
1552         /* clean up the buf */
1553         buf->b_hdr = NULL;
1554         kmem_cache_free(buf_cache, buf);
1555 }
1556 
1557 static void
1558 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1559 {
1560         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1561         ASSERT3P(hdr->b_state, ==, arc_anon);
1562         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1563         l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1564 
1565         if (l2hdr != NULL) {
1566                 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1567                 /*
1568                  * To prevent arc_free() and l2arc_evict() from
1569                  * attempting to free the same buffer at the same time,
1570                  * a FREE_IN_PROGRESS flag is given to arc_free() to
1571                  * give it priority.  l2arc_evict() can't destroy this
1572                  * header while we are waiting on l2arc_buflist_mtx.
1573                  *
1574                  * The hdr may be removed from l2ad_buflist before we
1575                  * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1576                  */
1577                 if (!buflist_held) {
1578                         mutex_enter(&l2arc_buflist_mtx);
1579                         l2hdr = hdr->b_l2hdr;
1580                 }
1581 
1582                 if (l2hdr != NULL) {
1583                         list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1584                         ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1585                         ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1586                         kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1587                         if (hdr->b_state == arc_l2c_only)
1588                                 l2arc_hdr_stat_remove();
1589                         hdr->b_l2hdr = NULL;
1590                 }
1591 
1592                 if (!buflist_held)
1593                         mutex_exit(&l2arc_buflist_mtx);
1594         }
1595 
1596         if (!BUF_EMPTY(hdr)) {
1597                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1598                 buf_discard_identity(hdr);
1599         }
1600         while (hdr->b_buf) {
1601                 arc_buf_t *buf = hdr->b_buf;
1602 
1603                 if (buf->b_efunc) {
1604                         mutex_enter(&arc_eviction_mtx);
1605                         mutex_enter(&buf->b_evict_lock);
1606                         ASSERT(buf->b_hdr != NULL);
1607                         arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1608                         hdr->b_buf = buf->b_next;
1609                         buf->b_hdr = &arc_eviction_hdr;
1610                         buf->b_next = arc_eviction_list;
1611                         arc_eviction_list = buf;
1612                         mutex_exit(&buf->b_evict_lock);
1613                         mutex_exit(&arc_eviction_mtx);
1614                 } else {
1615                         arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1616                 }
1617         }
1618         if (hdr->b_freeze_cksum != NULL) {
1619                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1620                 hdr->b_freeze_cksum = NULL;
1621         }
1622         if (hdr->b_thawed) {
1623                 kmem_free(hdr->b_thawed, 1);
1624                 hdr->b_thawed = NULL;
1625         }
1626 
1627         ASSERT(!list_link_active(&hdr->b_arc_node));
1628         ASSERT3P(hdr->b_hash_next, ==, NULL);
1629         ASSERT3P(hdr->b_acb, ==, NULL);
1630         kmem_cache_free(hdr_cache, hdr);
1631 }
1632 
1633 void
1634 arc_buf_free(arc_buf_t *buf, void *tag)
1635 {
1636         arc_buf_hdr_t *hdr = buf->b_hdr;
1637         int hashed = hdr->b_state != arc_anon;
1638 
1639         ASSERT(buf->b_efunc == NULL);
1640         ASSERT(buf->b_data != NULL);
1641 
1642         if (hashed) {
1643                 kmutex_t *hash_lock = HDR_LOCK(hdr);
1644 
1645                 mutex_enter(hash_lock);
1646                 hdr = buf->b_hdr;
1647                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1648 
1649                 (void) remove_reference(hdr, hash_lock, tag);
1650                 if (hdr->b_datacnt > 1) {
1651                         arc_buf_destroy(buf, FALSE, TRUE);
1652                 } else {
1653                         ASSERT(buf == hdr->b_buf);
1654                         ASSERT(buf->b_efunc == NULL);
1655                         hdr->b_flags |= ARC_BUF_AVAILABLE;
1656                 }
1657                 mutex_exit(hash_lock);
1658         } else if (HDR_IO_IN_PROGRESS(hdr)) {
1659                 int destroy_hdr;
1660                 /*
1661                  * We are in the middle of an async write.  Don't destroy
1662                  * this buffer unless the write completes before we finish
1663                  * decrementing the reference count.
1664                  */
1665                 mutex_enter(&arc_eviction_mtx);
1666                 (void) remove_reference(hdr, NULL, tag);
1667                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1668                 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1669                 mutex_exit(&arc_eviction_mtx);
1670                 if (destroy_hdr)
1671                         arc_hdr_destroy(hdr);
1672         } else {
1673                 if (remove_reference(hdr, NULL, tag) > 0)
1674                         arc_buf_destroy(buf, FALSE, TRUE);
1675                 else
1676                         arc_hdr_destroy(hdr);
1677         }
1678 }
1679 
1680 boolean_t
1681 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1682 {
1683         arc_buf_hdr_t *hdr = buf->b_hdr;
1684         kmutex_t *hash_lock = HDR_LOCK(hdr);
1685         boolean_t no_callback = (buf->b_efunc == NULL);
1686 
1687         if (hdr->b_state == arc_anon) {
1688                 ASSERT(hdr->b_datacnt == 1);
1689                 arc_buf_free(buf, tag);
1690                 return (no_callback);
1691         }
1692 
1693         mutex_enter(hash_lock);
1694         hdr = buf->b_hdr;
1695         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1696         ASSERT(hdr->b_state != arc_anon);
1697         ASSERT(buf->b_data != NULL);
1698 
1699         (void) remove_reference(hdr, hash_lock, tag);
1700         if (hdr->b_datacnt > 1) {
1701                 if (no_callback)
1702                         arc_buf_destroy(buf, FALSE, TRUE);
1703         } else if (no_callback) {
1704                 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1705                 ASSERT(buf->b_efunc == NULL);
1706                 hdr->b_flags |= ARC_BUF_AVAILABLE;
1707         }
1708         ASSERT(no_callback || hdr->b_datacnt > 1 ||
1709             refcount_is_zero(&hdr->b_refcnt));
1710         mutex_exit(hash_lock);
1711         return (no_callback);
1712 }
1713 
1714 int
1715 arc_buf_size(arc_buf_t *buf)
1716 {
1717         return (buf->b_hdr->b_size);
1718 }
1719 
1720 /*
1721  * Called from the DMU to determine if the current buffer should be
1722  * evicted. In order to ensure proper locking, the eviction must be initiated
1723  * from the DMU. Return true if the buffer is associated with user data and
1724  * duplicate buffers still exist.
1725  */
1726 boolean_t
1727 arc_buf_eviction_needed(arc_buf_t *buf)
1728 {
1729         arc_buf_hdr_t *hdr;
1730         boolean_t evict_needed = B_FALSE;
1731 
1732         if (zfs_disable_dup_eviction)
1733                 return (B_FALSE);
1734 
1735         mutex_enter(&buf->b_evict_lock);
1736         hdr = buf->b_hdr;
1737         if (hdr == NULL) {
1738                 /*
1739                  * We are in arc_do_user_evicts(); let that function
1740                  * perform the eviction.
1741                  */
1742                 ASSERT(buf->b_data == NULL);
1743                 mutex_exit(&buf->b_evict_lock);
1744                 return (B_FALSE);
1745         } else if (buf->b_data == NULL) {
1746                 /*
1747                  * We have already been added to the arc eviction list;
1748                  * recommend eviction.
1749                  */
1750                 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1751                 mutex_exit(&buf->b_evict_lock);
1752                 return (B_TRUE);
1753         }
1754 
1755         if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1756                 evict_needed = B_TRUE;
1757 
1758         mutex_exit(&buf->b_evict_lock);
1759         return (evict_needed);
1760 }
1761 
1762 /*
1763  * Evict buffers from list until we've removed the specified number of
1764  * bytes.  Move the removed buffers to the appropriate evict state.
1765  * If the recycle flag is set, then attempt to "recycle" a buffer:
1766  * - look for a buffer to evict that is `bytes' long.
1767  * - return the data block from this buffer rather than freeing it.
1768  * This flag is used by callers that are trying to make space for a
1769  * new buffer in a full arc cache.
1770  *
1771  * This function makes a "best effort".  It skips over any buffers
1772  * it can't get a hash_lock on, and so may not catch all candidates.
1773  * It may also return without evicting as much space as requested.
1774  */
1775 static void *
1776 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1777     arc_buf_contents_t type)
1778 {
1779         arc_state_t *evicted_state;
1780         uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1781         arc_buf_hdr_t *ab, *ab_prev = NULL;
1782         list_t *list = &state->arcs_list[type];
1783         kmutex_t *hash_lock;
1784         boolean_t have_lock;
1785         void *stolen = NULL;
1786 
1787         ASSERT(state == arc_mru || state == arc_mfu);
1788 
1789         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1790 
1791         mutex_enter(&state->arcs_mtx);
1792         mutex_enter(&evicted_state->arcs_mtx);
1793 
1794         for (ab = list_tail(list); ab; ab = ab_prev) {
1795                 ab_prev = list_prev(list, ab);
1796                 /* prefetch buffers have a minimum lifespan */
1797                 if (HDR_IO_IN_PROGRESS(ab) ||
1798                     (spa && ab->b_spa != spa) ||
1799                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1800                     ddi_get_lbolt() - ab->b_arc_access <
1801                     arc_min_prefetch_lifespan)) {
1802                         skipped++;
1803                         continue;
1804                 }
1805                 /* "lookahead" for better eviction candidate */
1806                 if (recycle && ab->b_size != bytes &&
1807                     ab_prev && ab_prev->b_size == bytes)
1808                         continue;
1809                 hash_lock = HDR_LOCK(ab);
1810                 have_lock = MUTEX_HELD(hash_lock);
1811                 if (have_lock || mutex_tryenter(hash_lock)) {
1812                         ASSERT0(refcount_count(&ab->b_refcnt));
1813                         ASSERT(ab->b_datacnt > 0);
1814                         while (ab->b_buf) {
1815                                 arc_buf_t *buf = ab->b_buf;
1816                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1817                                         missed += 1;
1818                                         break;
1819                                 }
1820                                 if (buf->b_data) {
1821                                         bytes_evicted += ab->b_size;
1822                                         if (recycle && ab->b_type == type &&
1823                                             ab->b_size == bytes &&
1824                                             !HDR_L2_WRITING(ab)) {
1825                                                 stolen = buf->b_data;
1826                                                 recycle = FALSE;
1827                                         }
1828                                 }
1829                                 if (buf->b_efunc) {
1830                                         mutex_enter(&arc_eviction_mtx);
1831                                         arc_buf_destroy(buf,
1832                                             buf->b_data == stolen, FALSE);
1833                                         ab->b_buf = buf->b_next;
1834                                         buf->b_hdr = &arc_eviction_hdr;
1835                                         buf->b_next = arc_eviction_list;
1836                                         arc_eviction_list = buf;
1837                                         mutex_exit(&arc_eviction_mtx);
1838                                         mutex_exit(&buf->b_evict_lock);
1839                                 } else {
1840                                         mutex_exit(&buf->b_evict_lock);
1841                                         arc_buf_destroy(buf,
1842                                             buf->b_data == stolen, TRUE);
1843                                 }
1844                         }
1845 
1846                         if (ab->b_l2hdr) {
1847                                 ARCSTAT_INCR(arcstat_evict_l2_cached,
1848                                     ab->b_size);
1849                         } else {
1850                                 if (l2arc_write_eligible(ab->b_spa, ab)) {
1851                                         ARCSTAT_INCR(arcstat_evict_l2_eligible,
1852                                             ab->b_size);
1853                                 } else {
1854                                         ARCSTAT_INCR(
1855                                             arcstat_evict_l2_ineligible,
1856                                             ab->b_size);
1857                                 }
1858                         }
1859 
1860                         if (ab->b_datacnt == 0) {
1861                                 arc_change_state(evicted_state, ab, hash_lock);
1862                                 ASSERT(HDR_IN_HASH_TABLE(ab));
1863                                 ab->b_flags |= ARC_IN_HASH_TABLE;
1864                                 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1865                                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1866                         }
1867                         if (!have_lock)
1868                                 mutex_exit(hash_lock);
1869                         if (bytes >= 0 && bytes_evicted >= bytes)
1870                                 break;
1871                 } else {
1872                         missed += 1;
1873                 }
1874         }
1875 
1876         mutex_exit(&evicted_state->arcs_mtx);
1877         mutex_exit(&state->arcs_mtx);
1878 
1879         if (bytes_evicted < bytes)
1880                 dprintf("only evicted %lld bytes from %x",
1881                     (longlong_t)bytes_evicted, state);
1882 
1883         if (skipped)
1884                 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1885 
1886         if (missed)
1887                 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1888 
1889         /*
1890          * We have just evicted some data into the ghost state, make
1891          * sure we also adjust the ghost state size if necessary.
1892          */
1893         if (arc_no_grow &&
1894             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1895                 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1896                     arc_mru_ghost->arcs_size - arc_c;
1897 
1898                 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1899                         int64_t todelete =
1900                             MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1901                         arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1902                 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1903                         int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1904                             arc_mru_ghost->arcs_size +
1905                             arc_mfu_ghost->arcs_size - arc_c);
1906                         arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1907                 }
1908         }
1909 
1910         return (stolen);
1911 }
1912 
1913 /*
1914  * Remove buffers from list until we've removed the specified number of
1915  * bytes.  Destroy the buffers that are removed.
1916  */
1917 static void
1918 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1919 {
1920         arc_buf_hdr_t *ab, *ab_prev;
1921         arc_buf_hdr_t marker = { 0 };
1922         list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1923         kmutex_t *hash_lock;
1924         uint64_t bytes_deleted = 0;
1925         uint64_t bufs_skipped = 0;
1926 
1927         ASSERT(GHOST_STATE(state));
1928 top:
1929         mutex_enter(&state->arcs_mtx);
1930         for (ab = list_tail(list); ab; ab = ab_prev) {
1931                 ab_prev = list_prev(list, ab);
1932                 if (spa && ab->b_spa != spa)
1933                         continue;
1934 
1935                 /* ignore markers */
1936                 if (ab->b_spa == 0)
1937                         continue;
1938 
1939                 hash_lock = HDR_LOCK(ab);
1940                 /* caller may be trying to modify this buffer, skip it */
1941                 if (MUTEX_HELD(hash_lock))
1942                         continue;
1943                 if (mutex_tryenter(hash_lock)) {
1944                         ASSERT(!HDR_IO_IN_PROGRESS(ab));
1945                         ASSERT(ab->b_buf == NULL);
1946                         ARCSTAT_BUMP(arcstat_deleted);
1947                         bytes_deleted += ab->b_size;
1948 
1949                         if (ab->b_l2hdr != NULL) {
1950                                 /*
1951                                  * This buffer is cached on the 2nd Level ARC;
1952                                  * don't destroy the header.
1953                                  */
1954                                 arc_change_state(arc_l2c_only, ab, hash_lock);
1955                                 mutex_exit(hash_lock);
1956                         } else {
1957                                 arc_change_state(arc_anon, ab, hash_lock);
1958                                 mutex_exit(hash_lock);
1959                                 arc_hdr_destroy(ab);
1960                         }
1961 
1962                         DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1963                         if (bytes >= 0 && bytes_deleted >= bytes)
1964                                 break;
1965                 } else if (bytes < 0) {
1966                         /*
1967                          * Insert a list marker and then wait for the
1968                          * hash lock to become available. Once its
1969                          * available, restart from where we left off.
1970                          */
1971                         list_insert_after(list, ab, &marker);
1972                         mutex_exit(&state->arcs_mtx);
1973                         mutex_enter(hash_lock);
1974                         mutex_exit(hash_lock);
1975                         mutex_enter(&state->arcs_mtx);
1976                         ab_prev = list_prev(list, &marker);
1977                         list_remove(list, &marker);
1978                 } else
1979                         bufs_skipped += 1;
1980         }
1981         mutex_exit(&state->arcs_mtx);
1982 
1983         if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1984             (bytes < 0 || bytes_deleted < bytes)) {
1985                 list = &state->arcs_list[ARC_BUFC_METADATA];
1986                 goto top;
1987         }
1988 
1989         if (bufs_skipped) {
1990                 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1991                 ASSERT(bytes >= 0);
1992         }
1993 
1994         if (bytes_deleted < bytes)
1995                 dprintf("only deleted %lld bytes from %p",
1996                     (longlong_t)bytes_deleted, state);
1997 }
1998 
1999 static void
2000 arc_adjust(void)
2001 {
2002         int64_t adjustment, delta;
2003 
2004         /*
2005          * Adjust MRU size
2006          */
2007 
2008         adjustment = MIN((int64_t)(arc_size - arc_c),
2009             (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2010             arc_p));
2011 
2012         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2013                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2014                 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
2015                 adjustment -= delta;
2016         }
2017 
2018         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2019                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2020                 (void) arc_evict(arc_mru, NULL, delta, FALSE,
2021                     ARC_BUFC_METADATA);
2022         }
2023 
2024         /*
2025          * Adjust MFU size
2026          */
2027 
2028         adjustment = arc_size - arc_c;
2029 
2030         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2031                 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2032                 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
2033                 adjustment -= delta;
2034         }
2035 
2036         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2037                 int64_t delta = MIN(adjustment,
2038                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2039                 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
2040                     ARC_BUFC_METADATA);
2041         }
2042 
2043         /*
2044          * Adjust ghost lists
2045          */
2046 
2047         adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2048 
2049         if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2050                 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2051                 arc_evict_ghost(arc_mru_ghost, NULL, delta);
2052         }
2053 
2054         adjustment =
2055             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2056 
2057         if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2058                 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2059                 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
2060         }
2061 }
2062 
2063 static void
2064 arc_do_user_evicts(void)
2065 {
2066         mutex_enter(&arc_eviction_mtx);
2067         while (arc_eviction_list != NULL) {
2068                 arc_buf_t *buf = arc_eviction_list;
2069                 arc_eviction_list = buf->b_next;
2070                 mutex_enter(&buf->b_evict_lock);
2071                 buf->b_hdr = NULL;
2072                 mutex_exit(&buf->b_evict_lock);
2073                 mutex_exit(&arc_eviction_mtx);
2074 
2075                 if (buf->b_efunc != NULL)
2076                         VERIFY(buf->b_efunc(buf) == 0);
2077 
2078                 buf->b_efunc = NULL;
2079                 buf->b_private = NULL;
2080                 kmem_cache_free(buf_cache, buf);
2081                 mutex_enter(&arc_eviction_mtx);
2082         }
2083         mutex_exit(&arc_eviction_mtx);
2084 }
2085 
2086 /*
2087  * Flush all *evictable* data from the cache for the given spa.
2088  * NOTE: this will not touch "active" (i.e. referenced) data.
2089  */
2090 void
2091 arc_flush(spa_t *spa)
2092 {
2093         uint64_t guid = 0;
2094 
2095         if (spa)
2096                 guid = spa_load_guid(spa);
2097 
2098         while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
2099                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2100                 if (spa)
2101                         break;
2102         }
2103         while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
2104                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2105                 if (spa)
2106                         break;
2107         }
2108         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
2109                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2110                 if (spa)
2111                         break;
2112         }
2113         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
2114                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2115                 if (spa)
2116                         break;
2117         }
2118 
2119         arc_evict_ghost(arc_mru_ghost, guid, -1);
2120         arc_evict_ghost(arc_mfu_ghost, guid, -1);
2121 
2122         mutex_enter(&arc_reclaim_thr_lock);
2123         arc_do_user_evicts();
2124         mutex_exit(&arc_reclaim_thr_lock);
2125         ASSERT(spa || arc_eviction_list == NULL);
2126 }
2127 
2128 void
2129 arc_shrink(void)
2130 {
2131         if (arc_c > arc_c_min) {
2132                 uint64_t to_free;
2133 
2134 #ifdef _KERNEL
2135                 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
2136 #else
2137                 to_free = arc_c >> arc_shrink_shift;
2138 #endif
2139                 if (arc_c > arc_c_min + to_free)
2140                         atomic_add_64(&arc_c, -to_free);
2141                 else
2142                         arc_c = arc_c_min;
2143 
2144                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2145                 if (arc_c > arc_size)
2146                         arc_c = MAX(arc_size, arc_c_min);
2147                 if (arc_p > arc_c)
2148                         arc_p = (arc_c >> 1);
2149                 ASSERT(arc_c >= arc_c_min);
2150                 ASSERT((int64_t)arc_p >= 0);
2151         }
2152 
2153         if (arc_size > arc_c)
2154                 arc_adjust();
2155 }
2156 
2157 /*
2158  * Determine if the system is under memory pressure and is asking
2159  * to reclaim memory. A return value of 1 indicates that the system
2160  * is under memory pressure and that the arc should adjust accordingly.
2161  */
2162 static int
2163 arc_reclaim_needed(void)
2164 {
2165         uint64_t extra;
2166 
2167 #ifdef _KERNEL
2168 
2169         if (needfree)
2170                 return (1);
2171 
2172         /*
2173          * take 'desfree' extra pages, so we reclaim sooner, rather than later
2174          */
2175         extra = desfree;
2176 
2177         /*
2178          * check that we're out of range of the pageout scanner.  It starts to
2179          * schedule paging if freemem is less than lotsfree and needfree.
2180          * lotsfree is the high-water mark for pageout, and needfree is the
2181          * number of needed free pages.  We add extra pages here to make sure
2182          * the scanner doesn't start up while we're freeing memory.
2183          */
2184         if (freemem < lotsfree + needfree + extra)
2185                 return (1);
2186 
2187         /*
2188          * check to make sure that swapfs has enough space so that anon
2189          * reservations can still succeed. anon_resvmem() checks that the
2190          * availrmem is greater than swapfs_minfree, and the number of reserved
2191          * swap pages.  We also add a bit of extra here just to prevent
2192          * circumstances from getting really dire.
2193          */
2194         if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2195                 return (1);
2196 
2197 #if defined(__i386)
2198         /*
2199          * If we're on an i386 platform, it's possible that we'll exhaust the
2200          * kernel heap space before we ever run out of available physical
2201          * memory.  Most checks of the size of the heap_area compare against
2202          * tune.t_minarmem, which is the minimum available real memory that we
2203          * can have in the system.  However, this is generally fixed at 25 pages
2204          * which is so low that it's useless.  In this comparison, we seek to
2205          * calculate the total heap-size, and reclaim if more than 3/4ths of the
2206          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2207          * free)
2208          */
2209         if (vmem_size(heap_arena, VMEM_FREE) <
2210             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2211                 return (1);
2212 #endif
2213 
2214         /*
2215          * If zio data pages are being allocated out of a separate heap segment,
2216          * then enforce that the size of available vmem for this arena remains
2217          * above about 1/16th free.
2218          *
2219          * Note: The 1/16th arena free requirement was put in place
2220          * to aggressively evict memory from the arc in order to avoid
2221          * memory fragmentation issues.
2222          */
2223         if (zio_arena != NULL &&
2224             vmem_size(zio_arena, VMEM_FREE) <
2225             (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2226                 return (1);
2227 #else
2228         if (spa_get_random(100) == 0)
2229                 return (1);
2230 #endif
2231         return (0);
2232 }
2233 
2234 static void
2235 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2236 {
2237         size_t                  i;
2238         kmem_cache_t            *prev_cache = NULL;
2239         kmem_cache_t            *prev_data_cache = NULL;
2240         extern kmem_cache_t     *zio_buf_cache[];
2241         extern kmem_cache_t     *zio_data_buf_cache[];
2242 
2243 #ifdef _KERNEL
2244         if (arc_meta_used >= arc_meta_limit) {
2245                 /*
2246                  * We are exceeding our meta-data cache limit.
2247                  * Purge some DNLC entries to release holds on meta-data.
2248                  */
2249                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2250         }
2251 #if defined(__i386)
2252         /*
2253          * Reclaim unused memory from all kmem caches.
2254          */
2255         kmem_reap();
2256 #endif
2257 #endif
2258 
2259         /*
2260          * An aggressive reclamation will shrink the cache size as well as
2261          * reap free buffers from the arc kmem caches.
2262          */
2263         if (strat == ARC_RECLAIM_AGGR)
2264                 arc_shrink();
2265 
2266         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2267                 if (zio_buf_cache[i] != prev_cache) {
2268                         prev_cache = zio_buf_cache[i];
2269                         kmem_cache_reap_now(zio_buf_cache[i]);
2270                 }
2271                 if (zio_data_buf_cache[i] != prev_data_cache) {
2272                         prev_data_cache = zio_data_buf_cache[i];
2273                         kmem_cache_reap_now(zio_data_buf_cache[i]);
2274                 }
2275         }
2276         kmem_cache_reap_now(buf_cache);
2277         kmem_cache_reap_now(hdr_cache);
2278 
2279         /*
2280          * Ask the vmem areana to reclaim unused memory from its
2281          * quantum caches.
2282          */
2283         if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2284                 vmem_qcache_reap(zio_arena);
2285 }
2286 
2287 static void
2288 arc_reclaim_thread(void)
2289 {
2290         clock_t                 growtime = 0;
2291         arc_reclaim_strategy_t  last_reclaim = ARC_RECLAIM_CONS;
2292         callb_cpr_t             cpr;
2293 
2294         CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2295 
2296         mutex_enter(&arc_reclaim_thr_lock);
2297         while (arc_thread_exit == 0) {
2298                 if (arc_reclaim_needed()) {
2299 
2300                         if (arc_no_grow) {
2301                                 if (last_reclaim == ARC_RECLAIM_CONS) {
2302                                         last_reclaim = ARC_RECLAIM_AGGR;
2303                                 } else {
2304                                         last_reclaim = ARC_RECLAIM_CONS;
2305                                 }
2306                         } else {
2307                                 arc_no_grow = TRUE;
2308                                 last_reclaim = ARC_RECLAIM_AGGR;
2309                                 membar_producer();
2310                         }
2311 
2312                         /* reset the growth delay for every reclaim */
2313                         growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2314 
2315                         arc_kmem_reap_now(last_reclaim);
2316                         arc_warm = B_TRUE;
2317 
2318                 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2319                         arc_no_grow = FALSE;
2320                 }
2321 
2322                 arc_adjust();
2323 
2324                 if (arc_eviction_list != NULL)
2325                         arc_do_user_evicts();
2326 
2327                 /* block until needed, or one second, whichever is shorter */
2328                 CALLB_CPR_SAFE_BEGIN(&cpr);
2329                 (void) cv_timedwait(&arc_reclaim_thr_cv,
2330                     &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2331                 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2332         }
2333 
2334         arc_thread_exit = 0;
2335         cv_broadcast(&arc_reclaim_thr_cv);
2336         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_thr_lock */
2337         thread_exit();
2338 }
2339 
2340 /*
2341  * Adapt arc info given the number of bytes we are trying to add and
2342  * the state that we are comming from.  This function is only called
2343  * when we are adding new content to the cache.
2344  */
2345 static void
2346 arc_adapt(int bytes, arc_state_t *state)
2347 {
2348         int mult;
2349         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2350 
2351         if (state == arc_l2c_only)
2352                 return;
2353 
2354         ASSERT(bytes > 0);
2355         /*
2356          * Adapt the target size of the MRU list:
2357          *      - if we just hit in the MRU ghost list, then increase
2358          *        the target size of the MRU list.
2359          *      - if we just hit in the MFU ghost list, then increase
2360          *        the target size of the MFU list by decreasing the
2361          *        target size of the MRU list.
2362          */
2363         if (state == arc_mru_ghost) {
2364                 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2365                     1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2366                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2367 
2368                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2369         } else if (state == arc_mfu_ghost) {
2370                 uint64_t delta;
2371 
2372                 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2373                     1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2374                 mult = MIN(mult, 10);
2375 
2376                 delta = MIN(bytes * mult, arc_p);
2377                 arc_p = MAX(arc_p_min, arc_p - delta);
2378         }
2379         ASSERT((int64_t)arc_p >= 0);
2380 
2381         if (arc_reclaim_needed()) {
2382                 cv_signal(&arc_reclaim_thr_cv);
2383                 return;
2384         }
2385 
2386         if (arc_no_grow)
2387                 return;
2388 
2389         if (arc_c >= arc_c_max)
2390                 return;
2391 
2392         /*
2393          * If we're within (2 * maxblocksize) bytes of the target
2394          * cache size, increment the target cache size
2395          */
2396         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2397                 atomic_add_64(&arc_c, (int64_t)bytes);
2398                 if (arc_c > arc_c_max)
2399                         arc_c = arc_c_max;
2400                 else if (state == arc_anon)
2401                         atomic_add_64(&arc_p, (int64_t)bytes);
2402                 if (arc_p > arc_c)
2403                         arc_p = arc_c;
2404         }
2405         ASSERT((int64_t)arc_p >= 0);
2406 }
2407 
2408 /*
2409  * Check if the cache has reached its limits and eviction is required
2410  * prior to insert.
2411  */
2412 static int
2413 arc_evict_needed(arc_buf_contents_t type)
2414 {
2415         if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2416                 return (1);
2417 
2418         if (arc_reclaim_needed())
2419                 return (1);
2420 
2421         return (arc_size > arc_c);
2422 }
2423 
2424 /*
2425  * The buffer, supplied as the first argument, needs a data block.
2426  * So, if we are at cache max, determine which cache should be victimized.
2427  * We have the following cases:
2428  *
2429  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2430  * In this situation if we're out of space, but the resident size of the MFU is
2431  * under the limit, victimize the MFU cache to satisfy this insertion request.
2432  *
2433  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2434  * Here, we've used up all of the available space for the MRU, so we need to
2435  * evict from our own cache instead.  Evict from the set of resident MRU
2436  * entries.
2437  *
2438  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2439  * c minus p represents the MFU space in the cache, since p is the size of the
2440  * cache that is dedicated to the MRU.  In this situation there's still space on
2441  * the MFU side, so the MRU side needs to be victimized.
2442  *
2443  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2444  * MFU's resident set is consuming more space than it has been allotted.  In
2445  * this situation, we must victimize our own cache, the MFU, for this insertion.
2446  */
2447 static void
2448 arc_get_data_buf(arc_buf_t *buf)
2449 {
2450         arc_state_t             *state = buf->b_hdr->b_state;
2451         uint64_t                size = buf->b_hdr->b_size;
2452         arc_buf_contents_t      type = buf->b_hdr->b_type;
2453 
2454         arc_adapt(size, state);
2455 
2456         /*
2457          * We have not yet reached cache maximum size,
2458          * just allocate a new buffer.
2459          */
2460         if (!arc_evict_needed(type)) {
2461                 if (type == ARC_BUFC_METADATA) {
2462                         buf->b_data = zio_buf_alloc(size);
2463                         arc_space_consume(size, ARC_SPACE_DATA);
2464                 } else {
2465                         ASSERT(type == ARC_BUFC_DATA);
2466                         buf->b_data = zio_data_buf_alloc(size);
2467                         ARCSTAT_INCR(arcstat_data_size, size);
2468                         atomic_add_64(&arc_size, size);
2469                 }
2470                 goto out;
2471         }
2472 
2473         /*
2474          * If we are prefetching from the mfu ghost list, this buffer
2475          * will end up on the mru list; so steal space from there.
2476          */
2477         if (state == arc_mfu_ghost)
2478                 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2479         else if (state == arc_mru_ghost)
2480                 state = arc_mru;
2481 
2482         if (state == arc_mru || state == arc_anon) {
2483                 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2484                 state = (arc_mfu->arcs_lsize[type] >= size &&
2485                     arc_p > mru_used) ? arc_mfu : arc_mru;
2486         } else {
2487                 /* MFU cases */
2488                 uint64_t mfu_space = arc_c - arc_p;
2489                 state =  (arc_mru->arcs_lsize[type] >= size &&
2490                     mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2491         }
2492         if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2493                 if (type == ARC_BUFC_METADATA) {
2494                         buf->b_data = zio_buf_alloc(size);
2495                         arc_space_consume(size, ARC_SPACE_DATA);
2496                 } else {
2497                         ASSERT(type == ARC_BUFC_DATA);
2498                         buf->b_data = zio_data_buf_alloc(size);
2499                         ARCSTAT_INCR(arcstat_data_size, size);
2500                         atomic_add_64(&arc_size, size);
2501                 }
2502                 ARCSTAT_BUMP(arcstat_recycle_miss);
2503         }
2504         ASSERT(buf->b_data != NULL);
2505 out:
2506         /*
2507          * Update the state size.  Note that ghost states have a
2508          * "ghost size" and so don't need to be updated.
2509          */
2510         if (!GHOST_STATE(buf->b_hdr->b_state)) {
2511                 arc_buf_hdr_t *hdr = buf->b_hdr;
2512 
2513                 atomic_add_64(&hdr->b_state->arcs_size, size);
2514                 if (list_link_active(&hdr->b_arc_node)) {
2515                         ASSERT(refcount_is_zero(&hdr->b_refcnt));
2516                         atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2517                 }
2518                 /*
2519                  * If we are growing the cache, and we are adding anonymous
2520                  * data, and we have outgrown arc_p, update arc_p
2521                  */
2522                 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2523                     arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2524                         arc_p = MIN(arc_c, arc_p + size);
2525         }
2526 }
2527 
2528 /*
2529  * This routine is called whenever a buffer is accessed.
2530  * NOTE: the hash lock is dropped in this function.
2531  */
2532 static void
2533 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2534 {
2535         clock_t now;
2536 
2537         ASSERT(MUTEX_HELD(hash_lock));
2538 
2539         if (buf->b_state == arc_anon) {
2540                 /*
2541                  * This buffer is not in the cache, and does not
2542                  * appear in our "ghost" list.  Add the new buffer
2543                  * to the MRU state.
2544                  */
2545 
2546                 ASSERT(buf->b_arc_access == 0);
2547                 buf->b_arc_access = ddi_get_lbolt();
2548                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2549                 arc_change_state(arc_mru, buf, hash_lock);
2550 
2551         } else if (buf->b_state == arc_mru) {
2552                 now = ddi_get_lbolt();
2553 
2554                 /*
2555                  * If this buffer is here because of a prefetch, then either:
2556                  * - clear the flag if this is a "referencing" read
2557                  *   (any subsequent access will bump this into the MFU state).
2558                  * or
2559                  * - move the buffer to the head of the list if this is
2560                  *   another prefetch (to make it less likely to be evicted).
2561                  */
2562                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2563                         if (refcount_count(&buf->b_refcnt) == 0) {
2564                                 ASSERT(list_link_active(&buf->b_arc_node));
2565                         } else {
2566                                 buf->b_flags &= ~ARC_PREFETCH;
2567                                 ARCSTAT_BUMP(arcstat_mru_hits);
2568                         }
2569                         buf->b_arc_access = now;
2570                         return;
2571                 }
2572 
2573                 /*
2574                  * This buffer has been "accessed" only once so far,
2575                  * but it is still in the cache. Move it to the MFU
2576                  * state.
2577                  */
2578                 if (now > buf->b_arc_access + ARC_MINTIME) {
2579                         /*
2580                          * More than 125ms have passed since we
2581                          * instantiated this buffer.  Move it to the
2582                          * most frequently used state.
2583                          */
2584                         buf->b_arc_access = now;
2585                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2586                         arc_change_state(arc_mfu, buf, hash_lock);
2587                 }
2588                 ARCSTAT_BUMP(arcstat_mru_hits);
2589         } else if (buf->b_state == arc_mru_ghost) {
2590                 arc_state_t     *new_state;
2591                 /*
2592                  * This buffer has been "accessed" recently, but
2593                  * was evicted from the cache.  Move it to the
2594                  * MFU state.
2595                  */
2596 
2597                 if (buf->b_flags & ARC_PREFETCH) {
2598                         new_state = arc_mru;
2599                         if (refcount_count(&buf->b_refcnt) > 0)
2600                                 buf->b_flags &= ~ARC_PREFETCH;
2601                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2602                 } else {
2603                         new_state = arc_mfu;
2604                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2605                 }
2606 
2607                 buf->b_arc_access = ddi_get_lbolt();
2608                 arc_change_state(new_state, buf, hash_lock);
2609 
2610                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2611         } else if (buf->b_state == arc_mfu) {
2612                 /*
2613                  * This buffer has been accessed more than once and is
2614                  * still in the cache.  Keep it in the MFU state.
2615                  *
2616                  * NOTE: an add_reference() that occurred when we did
2617                  * the arc_read() will have kicked this off the list.
2618                  * If it was a prefetch, we will explicitly move it to
2619                  * the head of the list now.
2620                  */
2621                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2622                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2623                         ASSERT(list_link_active(&buf->b_arc_node));
2624                 }
2625                 ARCSTAT_BUMP(arcstat_mfu_hits);
2626                 buf->b_arc_access = ddi_get_lbolt();
2627         } else if (buf->b_state == arc_mfu_ghost) {
2628                 arc_state_t     *new_state = arc_mfu;
2629                 /*
2630                  * This buffer has been accessed more than once but has
2631                  * been evicted from the cache.  Move it back to the
2632                  * MFU state.
2633                  */
2634 
2635                 if (buf->b_flags & ARC_PREFETCH) {
2636                         /*
2637                          * This is a prefetch access...
2638                          * move this block back to the MRU state.
2639                          */
2640                         ASSERT0(refcount_count(&buf->b_refcnt));
2641                         new_state = arc_mru;
2642                 }
2643 
2644                 buf->b_arc_access = ddi_get_lbolt();
2645                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2646                 arc_change_state(new_state, buf, hash_lock);
2647 
2648                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2649         } else if (buf->b_state == arc_l2c_only) {
2650                 /*
2651                  * This buffer is on the 2nd Level ARC.
2652                  */
2653 
2654                 buf->b_arc_access = ddi_get_lbolt();
2655                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2656                 arc_change_state(arc_mfu, buf, hash_lock);
2657         } else {
2658                 ASSERT(!"invalid arc state");
2659         }
2660 }
2661 
2662 /* a generic arc_done_func_t which you can use */
2663 /* ARGSUSED */
2664 void
2665 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2666 {
2667         if (zio == NULL || zio->io_error == 0)
2668                 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2669         VERIFY(arc_buf_remove_ref(buf, arg));
2670 }
2671 
2672 /* a generic arc_done_func_t */
2673 void
2674 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2675 {
2676         arc_buf_t **bufp = arg;
2677         if (zio && zio->io_error) {
2678                 VERIFY(arc_buf_remove_ref(buf, arg));
2679                 *bufp = NULL;
2680         } else {
2681                 *bufp = buf;
2682                 ASSERT(buf->b_data);
2683         }
2684 }
2685 
2686 static void
2687 arc_read_done(zio_t *zio)
2688 {
2689         arc_buf_hdr_t   *hdr, *found;
2690         arc_buf_t       *buf;
2691         arc_buf_t       *abuf;  /* buffer we're assigning to callback */
2692         kmutex_t        *hash_lock;
2693         arc_callback_t  *callback_list, *acb;
2694         int             freeable = FALSE;
2695 
2696         buf = zio->io_private;
2697         hdr = buf->b_hdr;
2698 
2699         /*
2700          * The hdr was inserted into hash-table and removed from lists
2701          * prior to starting I/O.  We should find this header, since
2702          * it's in the hash table, and it should be legit since it's
2703          * not possible to evict it during the I/O.  The only possible
2704          * reason for it not to be found is if we were freed during the
2705          * read.
2706          */
2707         found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2708             &hash_lock);
2709 
2710         ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2711             (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2712             (found == hdr && HDR_L2_READING(hdr)));
2713 
2714         hdr->b_flags &= ~ARC_L2_EVICTED;
2715         if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2716                 hdr->b_flags &= ~ARC_L2CACHE;
2717 
2718         /* byteswap if necessary */
2719         callback_list = hdr->b_acb;
2720         ASSERT(callback_list != NULL);
2721         if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2722                 dmu_object_byteswap_t bswap =
2723                     DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2724                 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2725                     byteswap_uint64_array :
2726                     dmu_ot_byteswap[bswap].ob_func;
2727                 func(buf->b_data, hdr->b_size);
2728         }
2729 
2730         arc_cksum_compute(buf, B_FALSE);
2731         arc_buf_watch(buf);
2732 
2733         if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2734                 /*
2735                  * Only call arc_access on anonymous buffers.  This is because
2736                  * if we've issued an I/O for an evicted buffer, we've already
2737                  * called arc_access (to prevent any simultaneous readers from
2738                  * getting confused).
2739                  */
2740                 arc_access(hdr, hash_lock);
2741         }
2742 
2743         /* create copies of the data buffer for the callers */
2744         abuf = buf;
2745         for (acb = callback_list; acb; acb = acb->acb_next) {
2746                 if (acb->acb_done) {
2747                         if (abuf == NULL) {
2748                                 ARCSTAT_BUMP(arcstat_duplicate_reads);
2749                                 abuf = arc_buf_clone(buf);
2750                         }
2751                         acb->acb_buf = abuf;
2752                         abuf = NULL;
2753                 }
2754         }
2755         hdr->b_acb = NULL;
2756         hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2757         ASSERT(!HDR_BUF_AVAILABLE(hdr));
2758         if (abuf == buf) {
2759                 ASSERT(buf->b_efunc == NULL);
2760                 ASSERT(hdr->b_datacnt == 1);
2761                 hdr->b_flags |= ARC_BUF_AVAILABLE;
2762         }
2763 
2764         ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2765 
2766         if (zio->io_error != 0) {
2767                 hdr->b_flags |= ARC_IO_ERROR;
2768                 if (hdr->b_state != arc_anon)
2769                         arc_change_state(arc_anon, hdr, hash_lock);
2770                 if (HDR_IN_HASH_TABLE(hdr))
2771                         buf_hash_remove(hdr);
2772                 freeable = refcount_is_zero(&hdr->b_refcnt);
2773         }
2774 
2775         /*
2776          * Broadcast before we drop the hash_lock to avoid the possibility
2777          * that the hdr (and hence the cv) might be freed before we get to
2778          * the cv_broadcast().
2779          */
2780         cv_broadcast(&hdr->b_cv);
2781 
2782         if (hash_lock) {
2783                 mutex_exit(hash_lock);
2784         } else {
2785                 /*
2786                  * This block was freed while we waited for the read to
2787                  * complete.  It has been removed from the hash table and
2788                  * moved to the anonymous state (so that it won't show up
2789                  * in the cache).
2790                  */
2791                 ASSERT3P(hdr->b_state, ==, arc_anon);
2792                 freeable = refcount_is_zero(&hdr->b_refcnt);
2793         }
2794 
2795         /* execute each callback and free its structure */
2796         while ((acb = callback_list) != NULL) {
2797                 if (acb->acb_done)
2798                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2799 
2800                 if (acb->acb_zio_dummy != NULL) {
2801                         acb->acb_zio_dummy->io_error = zio->io_error;
2802                         zio_nowait(acb->acb_zio_dummy);
2803                 }
2804 
2805                 callback_list = acb->acb_next;
2806                 kmem_free(acb, sizeof (arc_callback_t));
2807         }
2808 
2809         if (freeable)
2810                 arc_hdr_destroy(hdr);
2811 }
2812 
2813 /*
2814  * "Read" the block at the specified DVA (in bp) via the
2815  * cache.  If the block is found in the cache, invoke the provided
2816  * callback immediately and return.  Note that the `zio' parameter
2817  * in the callback will be NULL in this case, since no IO was
2818  * required.  If the block is not in the cache pass the read request
2819  * on to the spa with a substitute callback function, so that the
2820  * requested block will be added to the cache.
2821  *
2822  * If a read request arrives for a block that has a read in-progress,
2823  * either wait for the in-progress read to complete (and return the
2824  * results); or, if this is a read with a "done" func, add a record
2825  * to the read to invoke the "done" func when the read completes,
2826  * and return; or just return.
2827  *
2828  * arc_read_done() will invoke all the requested "done" functions
2829  * for readers of this block.
2830  */
2831 int
2832 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2833     void *private, int priority, int zio_flags, uint32_t *arc_flags,
2834     const zbookmark_t *zb)
2835 {
2836         arc_buf_hdr_t *hdr;
2837         arc_buf_t *buf = NULL;
2838         kmutex_t *hash_lock;
2839         zio_t *rzio;
2840         uint64_t guid = spa_load_guid(spa);
2841 
2842 top:
2843         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2844             &hash_lock);
2845         if (hdr && hdr->b_datacnt > 0) {
2846 
2847                 *arc_flags |= ARC_CACHED;
2848 
2849                 if (HDR_IO_IN_PROGRESS(hdr)) {
2850 
2851                         if (*arc_flags & ARC_WAIT) {
2852                                 cv_wait(&hdr->b_cv, hash_lock);
2853                                 mutex_exit(hash_lock);
2854                                 goto top;
2855                         }
2856                         ASSERT(*arc_flags & ARC_NOWAIT);
2857 
2858                         if (done) {
2859                                 arc_callback_t  *acb = NULL;
2860 
2861                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2862                                     KM_SLEEP);
2863                                 acb->acb_done = done;
2864                                 acb->acb_private = private;
2865                                 if (pio != NULL)
2866                                         acb->acb_zio_dummy = zio_null(pio,
2867                                             spa, NULL, NULL, NULL, zio_flags);
2868 
2869                                 ASSERT(acb->acb_done != NULL);
2870                                 acb->acb_next = hdr->b_acb;
2871                                 hdr->b_acb = acb;
2872                                 add_reference(hdr, hash_lock, private);
2873                                 mutex_exit(hash_lock);
2874                                 return (0);
2875                         }
2876                         mutex_exit(hash_lock);
2877                         return (0);
2878                 }
2879 
2880                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2881 
2882                 if (done) {
2883                         add_reference(hdr, hash_lock, private);
2884                         /*
2885                          * If this block is already in use, create a new
2886                          * copy of the data so that we will be guaranteed
2887                          * that arc_release() will always succeed.
2888                          */
2889                         buf = hdr->b_buf;
2890                         ASSERT(buf);
2891                         ASSERT(buf->b_data);
2892                         if (HDR_BUF_AVAILABLE(hdr)) {
2893                                 ASSERT(buf->b_efunc == NULL);
2894                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2895                         } else {
2896                                 buf = arc_buf_clone(buf);
2897                         }
2898 
2899                 } else if (*arc_flags & ARC_PREFETCH &&
2900                     refcount_count(&hdr->b_refcnt) == 0) {
2901                         hdr->b_flags |= ARC_PREFETCH;
2902                 }
2903                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2904                 arc_access(hdr, hash_lock);
2905                 if (*arc_flags & ARC_L2CACHE)
2906                         hdr->b_flags |= ARC_L2CACHE;
2907                 if (*arc_flags & ARC_L2COMPRESS)
2908                         hdr->b_flags |= ARC_L2COMPRESS;
2909                 mutex_exit(hash_lock);
2910                 ARCSTAT_BUMP(arcstat_hits);
2911                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2912                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2913                     data, metadata, hits);
2914 
2915                 if (done)
2916                         done(NULL, buf, private);
2917         } else {
2918                 uint64_t size = BP_GET_LSIZE(bp);
2919                 arc_callback_t  *acb;
2920                 vdev_t *vd = NULL;
2921                 uint64_t addr = 0;
2922                 boolean_t devw = B_FALSE;
2923 
2924                 if (hdr == NULL) {
2925                         /* this block is not in the cache */
2926                         arc_buf_hdr_t   *exists;
2927                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2928                         buf = arc_buf_alloc(spa, size, private, type);
2929                         hdr = buf->b_hdr;
2930                         hdr->b_dva = *BP_IDENTITY(bp);
2931                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2932                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2933                         exists = buf_hash_insert(hdr, &hash_lock);
2934                         if (exists) {
2935                                 /* somebody beat us to the hash insert */
2936                                 mutex_exit(hash_lock);
2937                                 buf_discard_identity(hdr);
2938                                 (void) arc_buf_remove_ref(buf, private);
2939                                 goto top; /* restart the IO request */
2940                         }
2941                         /* if this is a prefetch, we don't have a reference */
2942                         if (*arc_flags & ARC_PREFETCH) {
2943                                 (void) remove_reference(hdr, hash_lock,
2944                                     private);
2945                                 hdr->b_flags |= ARC_PREFETCH;
2946                         }
2947                         if (*arc_flags & ARC_L2CACHE)
2948                                 hdr->b_flags |= ARC_L2CACHE;
2949                         if (*arc_flags & ARC_L2COMPRESS)
2950                                 hdr->b_flags |= ARC_L2COMPRESS;
2951                         if (BP_GET_LEVEL(bp) > 0)
2952                                 hdr->b_flags |= ARC_INDIRECT;
2953                 } else {
2954                         /* this block is in the ghost cache */
2955                         ASSERT(GHOST_STATE(hdr->b_state));
2956                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2957                         ASSERT0(refcount_count(&hdr->b_refcnt));
2958                         ASSERT(hdr->b_buf == NULL);
2959 
2960                         /* if this is a prefetch, we don't have a reference */
2961                         if (*arc_flags & ARC_PREFETCH)
2962                                 hdr->b_flags |= ARC_PREFETCH;
2963                         else
2964                                 add_reference(hdr, hash_lock, private);
2965                         if (*arc_flags & ARC_L2CACHE)
2966                                 hdr->b_flags |= ARC_L2CACHE;
2967                         if (*arc_flags & ARC_L2COMPRESS)
2968                                 hdr->b_flags |= ARC_L2COMPRESS;
2969                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2970                         buf->b_hdr = hdr;
2971                         buf->b_data = NULL;
2972                         buf->b_efunc = NULL;
2973                         buf->b_private = NULL;
2974                         buf->b_next = NULL;
2975                         hdr->b_buf = buf;
2976                         ASSERT(hdr->b_datacnt == 0);
2977                         hdr->b_datacnt = 1;
2978                         arc_get_data_buf(buf);
2979                         arc_access(hdr, hash_lock);
2980                 }
2981 
2982                 ASSERT(!GHOST_STATE(hdr->b_state));
2983 
2984                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2985                 acb->acb_done = done;
2986                 acb->acb_private = private;
2987 
2988                 ASSERT(hdr->b_acb == NULL);
2989                 hdr->b_acb = acb;
2990                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2991 
2992                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2993                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2994                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2995                         addr = hdr->b_l2hdr->b_daddr;
2996                         /*
2997                          * Lock out device removal.
2998                          */
2999                         if (vdev_is_dead(vd) ||
3000                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3001                                 vd = NULL;
3002                 }
3003 
3004                 mutex_exit(hash_lock);
3005 
3006                 /*
3007                  * At this point, we have a level 1 cache miss.  Try again in
3008                  * L2ARC if possible.
3009                  */
3010                 ASSERT3U(hdr->b_size, ==, size);
3011                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3012                     uint64_t, size, zbookmark_t *, zb);
3013                 ARCSTAT_BUMP(arcstat_misses);
3014                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3015                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3016                     data, metadata, misses);
3017 
3018                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3019                         /*
3020                          * Read from the L2ARC if the following are true:
3021                          * 1. The L2ARC vdev was previously cached.
3022                          * 2. This buffer still has L2ARC metadata.
3023                          * 3. This buffer isn't currently writing to the L2ARC.
3024                          * 4. The L2ARC entry wasn't evicted, which may
3025                          *    also have invalidated the vdev.
3026                          * 5. This isn't prefetch and l2arc_noprefetch is set.
3027                          */
3028                         if (hdr->b_l2hdr != NULL &&
3029                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3030                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3031                                 l2arc_read_callback_t *cb;
3032 
3033                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3034                                 ARCSTAT_BUMP(arcstat_l2_hits);
3035 
3036                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3037                                     KM_SLEEP);
3038                                 cb->l2rcb_buf = buf;
3039                                 cb->l2rcb_spa = spa;
3040                                 cb->l2rcb_bp = *bp;
3041                                 cb->l2rcb_zb = *zb;
3042                                 cb->l2rcb_flags = zio_flags;
3043                                 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3044 
3045                                 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3046                                     addr + size < vd->vdev_psize -
3047                                     VDEV_LABEL_END_SIZE);
3048 
3049                                 /*
3050                                  * l2arc read.  The SCL_L2ARC lock will be
3051                                  * released by l2arc_read_done().
3052                                  * Issue a null zio if the underlying buffer
3053                                  * was squashed to zero size by compression.
3054                                  */
3055                                 if (hdr->b_l2hdr->b_compress ==
3056                                     ZIO_COMPRESS_EMPTY) {
3057                                         rzio = zio_null(pio, spa, vd,
3058                                             l2arc_read_done, cb,
3059                                             zio_flags | ZIO_FLAG_DONT_CACHE |
3060                                             ZIO_FLAG_CANFAIL |
3061                                             ZIO_FLAG_DONT_PROPAGATE |
3062                                             ZIO_FLAG_DONT_RETRY);
3063                                 } else {
3064                                         rzio = zio_read_phys(pio, vd, addr,
3065                                             hdr->b_l2hdr->b_asize,
3066                                             buf->b_data, ZIO_CHECKSUM_OFF,
3067                                             l2arc_read_done, cb, priority,
3068                                             zio_flags | ZIO_FLAG_DONT_CACHE |
3069                                             ZIO_FLAG_CANFAIL |
3070                                             ZIO_FLAG_DONT_PROPAGATE |
3071                                             ZIO_FLAG_DONT_RETRY, B_FALSE);
3072                                 }
3073                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3074                                     zio_t *, rzio);
3075                                 ARCSTAT_INCR(arcstat_l2_read_bytes,
3076                                     hdr->b_l2hdr->b_asize);
3077 
3078                                 if (*arc_flags & ARC_NOWAIT) {
3079                                         zio_nowait(rzio);
3080                                         return (0);
3081                                 }
3082 
3083                                 ASSERT(*arc_flags & ARC_WAIT);
3084                                 if (zio_wait(rzio) == 0)
3085                                         return (0);
3086 
3087                                 /* l2arc read error; goto zio_read() */
3088                         } else {
3089                                 DTRACE_PROBE1(l2arc__miss,
3090                                     arc_buf_hdr_t *, hdr);
3091                                 ARCSTAT_BUMP(arcstat_l2_misses);
3092                                 if (HDR_L2_WRITING(hdr))
3093                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
3094                                 spa_config_exit(spa, SCL_L2ARC, vd);
3095                         }
3096                 } else {
3097                         if (vd != NULL)
3098                                 spa_config_exit(spa, SCL_L2ARC, vd);
3099                         if (l2arc_ndev != 0) {
3100                                 DTRACE_PROBE1(l2arc__miss,
3101                                     arc_buf_hdr_t *, hdr);
3102                                 ARCSTAT_BUMP(arcstat_l2_misses);
3103                         }
3104                 }
3105 
3106                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3107                     arc_read_done, buf, priority, zio_flags, zb);
3108 
3109                 if (*arc_flags & ARC_WAIT)
3110                         return (zio_wait(rzio));
3111 
3112                 ASSERT(*arc_flags & ARC_NOWAIT);
3113                 zio_nowait(rzio);
3114         }
3115         return (0);
3116 }
3117 
3118 void
3119 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3120 {
3121         ASSERT(buf->b_hdr != NULL);
3122         ASSERT(buf->b_hdr->b_state != arc_anon);
3123         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3124         ASSERT(buf->b_efunc == NULL);
3125         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3126 
3127         buf->b_efunc = func;
3128         buf->b_private = private;
3129 }
3130 
3131 /*
3132  * Notify the arc that a block was freed, and thus will never be used again.
3133  */
3134 void
3135 arc_freed(spa_t *spa, const blkptr_t *bp)
3136 {
3137         arc_buf_hdr_t *hdr;
3138         kmutex_t *hash_lock;
3139         uint64_t guid = spa_load_guid(spa);
3140 
3141         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3142             &hash_lock);
3143         if (hdr == NULL)
3144                 return;
3145         if (HDR_BUF_AVAILABLE(hdr)) {
3146                 arc_buf_t *buf = hdr->b_buf;
3147                 add_reference(hdr, hash_lock, FTAG);
3148                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3149                 mutex_exit(hash_lock);
3150 
3151                 arc_release(buf, FTAG);
3152                 (void) arc_buf_remove_ref(buf, FTAG);
3153         } else {
3154                 mutex_exit(hash_lock);
3155         }
3156 
3157 }
3158 
3159 /*
3160  * This is used by the DMU to let the ARC know that a buffer is
3161  * being evicted, so the ARC should clean up.  If this arc buf
3162  * is not yet in the evicted state, it will be put there.
3163  */
3164 int
3165 arc_buf_evict(arc_buf_t *buf)
3166 {
3167         arc_buf_hdr_t *hdr;
3168         kmutex_t *hash_lock;
3169         arc_buf_t **bufp;
3170 
3171         mutex_enter(&buf->b_evict_lock);
3172         hdr = buf->b_hdr;
3173         if (hdr == NULL) {
3174                 /*
3175                  * We are in arc_do_user_evicts().
3176                  */
3177                 ASSERT(buf->b_data == NULL);
3178                 mutex_exit(&buf->b_evict_lock);
3179                 return (0);
3180         } else if (buf->b_data == NULL) {
3181                 arc_buf_t copy = *buf; /* structure assignment */
3182                 /*
3183                  * We are on the eviction list; process this buffer now
3184                  * but let arc_do_user_evicts() do the reaping.
3185                  */
3186                 buf->b_efunc = NULL;
3187                 mutex_exit(&buf->b_evict_lock);
3188                 VERIFY(copy.b_efunc(&copy) == 0);
3189                 return (1);
3190         }
3191         hash_lock = HDR_LOCK(hdr);
3192         mutex_enter(hash_lock);
3193         hdr = buf->b_hdr;
3194         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3195 
3196         ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3197         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3198 
3199         /*
3200          * Pull this buffer off of the hdr
3201          */
3202         bufp = &hdr->b_buf;
3203         while (*bufp != buf)
3204                 bufp = &(*bufp)->b_next;
3205         *bufp = buf->b_next;
3206 
3207         ASSERT(buf->b_data != NULL);
3208         arc_buf_destroy(buf, FALSE, FALSE);
3209 
3210         if (hdr->b_datacnt == 0) {
3211                 arc_state_t *old_state = hdr->b_state;
3212                 arc_state_t *evicted_state;
3213 
3214                 ASSERT(hdr->b_buf == NULL);
3215                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3216 
3217                 evicted_state =
3218                     (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3219 
3220                 mutex_enter(&old_state->arcs_mtx);
3221                 mutex_enter(&evicted_state->arcs_mtx);
3222 
3223                 arc_change_state(evicted_state, hdr, hash_lock);
3224                 ASSERT(HDR_IN_HASH_TABLE(hdr));
3225                 hdr->b_flags |= ARC_IN_HASH_TABLE;
3226                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3227 
3228                 mutex_exit(&evicted_state->arcs_mtx);
3229                 mutex_exit(&old_state->arcs_mtx);
3230         }
3231         mutex_exit(hash_lock);
3232         mutex_exit(&buf->b_evict_lock);
3233 
3234         VERIFY(buf->b_efunc(buf) == 0);
3235         buf->b_efunc = NULL;
3236         buf->b_private = NULL;
3237         buf->b_hdr = NULL;
3238         buf->b_next = NULL;
3239         kmem_cache_free(buf_cache, buf);
3240         return (1);
3241 }
3242 
3243 /*
3244  * Release this buffer from the cache, making it an anonymous buffer.  This
3245  * must be done after a read and prior to modifying the buffer contents.
3246  * If the buffer has more than one reference, we must make
3247  * a new hdr for the buffer.
3248  */
3249 void
3250 arc_release(arc_buf_t *buf, void *tag)
3251 {
3252         arc_buf_hdr_t *hdr;
3253         kmutex_t *hash_lock = NULL;
3254         l2arc_buf_hdr_t *l2hdr;
3255         uint64_t buf_size;
3256 
3257         /*
3258          * It would be nice to assert that if it's DMU metadata (level >
3259          * 0 || it's the dnode file), then it must be syncing context.
3260          * But we don't know that information at this level.
3261          */
3262 
3263         mutex_enter(&buf->b_evict_lock);
3264         hdr = buf->b_hdr;
3265 
3266         /* this buffer is not on any list */
3267         ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3268 
3269         if (hdr->b_state == arc_anon) {
3270                 /* this buffer is already released */
3271                 ASSERT(buf->b_efunc == NULL);
3272         } else {
3273                 hash_lock = HDR_LOCK(hdr);
3274                 mutex_enter(hash_lock);
3275                 hdr = buf->b_hdr;
3276                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3277         }
3278 
3279         l2hdr = hdr->b_l2hdr;
3280         if (l2hdr) {
3281                 mutex_enter(&l2arc_buflist_mtx);
3282                 hdr->b_l2hdr = NULL;
3283         }
3284         buf_size = hdr->b_size;
3285 
3286         /*
3287          * Do we have more than one buf?
3288          */
3289         if (hdr->b_datacnt > 1) {
3290                 arc_buf_hdr_t *nhdr;
3291                 arc_buf_t **bufp;
3292                 uint64_t blksz = hdr->b_size;
3293                 uint64_t spa = hdr->b_spa;
3294                 arc_buf_contents_t type = hdr->b_type;
3295                 uint32_t flags = hdr->b_flags;
3296 
3297                 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3298                 /*
3299                  * Pull the data off of this hdr and attach it to
3300                  * a new anonymous hdr.
3301                  */
3302                 (void) remove_reference(hdr, hash_lock, tag);
3303                 bufp = &hdr->b_buf;
3304                 while (*bufp != buf)
3305                         bufp = &(*bufp)->b_next;
3306                 *bufp = buf->b_next;
3307                 buf->b_next = NULL;
3308 
3309                 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3310                 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3311                 if (refcount_is_zero(&hdr->b_refcnt)) {
3312                         uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3313                         ASSERT3U(*size, >=, hdr->b_size);
3314                         atomic_add_64(size, -hdr->b_size);
3315                 }
3316 
3317                 /*
3318                  * We're releasing a duplicate user data buffer, update
3319                  * our statistics accordingly.
3320                  */
3321                 if (hdr->b_type == ARC_BUFC_DATA) {
3322                         ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3323                         ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3324                             -hdr->b_size);
3325                 }
3326                 hdr->b_datacnt -= 1;
3327                 arc_cksum_verify(buf);
3328                 arc_buf_unwatch(buf);
3329 
3330                 mutex_exit(hash_lock);
3331 
3332                 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3333                 nhdr->b_size = blksz;
3334                 nhdr->b_spa = spa;
3335                 nhdr->b_type = type;
3336                 nhdr->b_buf = buf;
3337                 nhdr->b_state = arc_anon;
3338                 nhdr->b_arc_access = 0;
3339                 nhdr->b_flags = flags & ARC_L2_WRITING;
3340                 nhdr->b_l2hdr = NULL;
3341                 nhdr->b_datacnt = 1;
3342                 nhdr->b_freeze_cksum = NULL;
3343                 (void) refcount_add(&nhdr->b_refcnt, tag);
3344                 buf->b_hdr = nhdr;
3345                 mutex_exit(&buf->b_evict_lock);
3346                 atomic_add_64(&arc_anon->arcs_size, blksz);
3347         } else {
3348                 mutex_exit(&buf->b_evict_lock);
3349                 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3350                 ASSERT(!list_link_active(&hdr->b_arc_node));
3351                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3352                 if (hdr->b_state != arc_anon)
3353                         arc_change_state(arc_anon, hdr, hash_lock);
3354                 hdr->b_arc_access = 0;
3355                 if (hash_lock)
3356                         mutex_exit(hash_lock);
3357 
3358                 buf_discard_identity(hdr);
3359                 arc_buf_thaw(buf);
3360         }
3361         buf->b_efunc = NULL;
3362         buf->b_private = NULL;
3363 
3364         if (l2hdr) {
3365                 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3366                 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3367                 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3368                 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3369                 mutex_exit(&l2arc_buflist_mtx);
3370         }
3371 }
3372 
3373 int
3374 arc_released(arc_buf_t *buf)
3375 {
3376         int released;
3377 
3378         mutex_enter(&buf->b_evict_lock);
3379         released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3380         mutex_exit(&buf->b_evict_lock);
3381         return (released);
3382 }
3383 
3384 int
3385 arc_has_callback(arc_buf_t *buf)
3386 {
3387         int callback;
3388 
3389         mutex_enter(&buf->b_evict_lock);
3390         callback = (buf->b_efunc != NULL);
3391         mutex_exit(&buf->b_evict_lock);
3392         return (callback);
3393 }
3394 
3395 #ifdef ZFS_DEBUG
3396 int
3397 arc_referenced(arc_buf_t *buf)
3398 {
3399         int referenced;
3400 
3401         mutex_enter(&buf->b_evict_lock);
3402         referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3403         mutex_exit(&buf->b_evict_lock);
3404         return (referenced);
3405 }
3406 #endif
3407 
3408 static void
3409 arc_write_ready(zio_t *zio)
3410 {
3411         arc_write_callback_t *callback = zio->io_private;
3412         arc_buf_t *buf = callback->awcb_buf;
3413         arc_buf_hdr_t *hdr = buf->b_hdr;
3414 
3415         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3416         callback->awcb_ready(zio, buf, callback->awcb_private);
3417 
3418         /*
3419          * If the IO is already in progress, then this is a re-write
3420          * attempt, so we need to thaw and re-compute the cksum.
3421          * It is the responsibility of the callback to handle the
3422          * accounting for any re-write attempt.
3423          */
3424         if (HDR_IO_IN_PROGRESS(hdr)) {
3425                 mutex_enter(&hdr->b_freeze_lock);
3426                 if (hdr->b_freeze_cksum != NULL) {
3427                         kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3428                         hdr->b_freeze_cksum = NULL;
3429                 }
3430                 mutex_exit(&hdr->b_freeze_lock);
3431         }
3432         arc_cksum_compute(buf, B_FALSE);
3433         hdr->b_flags |= ARC_IO_IN_PROGRESS;
3434 }
3435 
3436 static void
3437 arc_write_done(zio_t *zio)
3438 {
3439         arc_write_callback_t *callback = zio->io_private;
3440         arc_buf_t *buf = callback->awcb_buf;
3441         arc_buf_hdr_t *hdr = buf->b_hdr;
3442 
3443         ASSERT(hdr->b_acb == NULL);
3444 
3445         if (zio->io_error == 0) {
3446                 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3447                 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3448                 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3449         } else {
3450                 ASSERT(BUF_EMPTY(hdr));
3451         }
3452 
3453         /*
3454          * If the block to be written was all-zero, we may have
3455          * compressed it away.  In this case no write was performed
3456          * so there will be no dva/birth/checksum.  The buffer must
3457          * therefore remain anonymous (and uncached).
3458          */
3459         if (!BUF_EMPTY(hdr)) {
3460                 arc_buf_hdr_t *exists;
3461                 kmutex_t *hash_lock;
3462 
3463                 ASSERT(zio->io_error == 0);
3464 
3465                 arc_cksum_verify(buf);
3466 
3467                 exists = buf_hash_insert(hdr, &hash_lock);
3468                 if (exists) {
3469                         /*
3470                          * This can only happen if we overwrite for
3471                          * sync-to-convergence, because we remove
3472                          * buffers from the hash table when we arc_free().
3473                          */
3474                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3475                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3476                                         panic("bad overwrite, hdr=%p exists=%p",
3477                                             (void *)hdr, (void *)exists);
3478                                 ASSERT(refcount_is_zero(&exists->b_refcnt));
3479                                 arc_change_state(arc_anon, exists, hash_lock);
3480                                 mutex_exit(hash_lock);
3481                                 arc_hdr_destroy(exists);
3482                                 exists = buf_hash_insert(hdr, &hash_lock);
3483                                 ASSERT3P(exists, ==, NULL);
3484                         } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3485                                 /* nopwrite */
3486                                 ASSERT(zio->io_prop.zp_nopwrite);
3487                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3488                                         panic("bad nopwrite, hdr=%p exists=%p",
3489                                             (void *)hdr, (void *)exists);
3490                         } else {
3491                                 /* Dedup */
3492                                 ASSERT(hdr->b_datacnt == 1);
3493                                 ASSERT(hdr->b_state == arc_anon);
3494                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
3495                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3496                         }
3497                 }
3498                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3499                 /* if it's not anon, we are doing a scrub */
3500                 if (!exists && hdr->b_state == arc_anon)
3501                         arc_access(hdr, hash_lock);
3502                 mutex_exit(hash_lock);
3503         } else {
3504                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3505         }
3506 
3507         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3508         callback->awcb_done(zio, buf, callback->awcb_private);
3509 
3510         kmem_free(callback, sizeof (arc_write_callback_t));
3511 }
3512 
3513 zio_t *
3514 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3515     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3516     const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *done,
3517     void *private, int priority, int zio_flags, const zbookmark_t *zb)
3518 {
3519         arc_buf_hdr_t *hdr = buf->b_hdr;
3520         arc_write_callback_t *callback;
3521         zio_t *zio;
3522 
3523         ASSERT(ready != NULL);
3524         ASSERT(done != NULL);
3525         ASSERT(!HDR_IO_ERROR(hdr));
3526         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3527         ASSERT(hdr->b_acb == NULL);
3528         if (l2arc)
3529                 hdr->b_flags |= ARC_L2CACHE;
3530         if (l2arc_compress)
3531                 hdr->b_flags |= ARC_L2COMPRESS;
3532         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3533         callback->awcb_ready = ready;
3534         callback->awcb_done = done;
3535         callback->awcb_private = private;
3536         callback->awcb_buf = buf;
3537 
3538         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3539             arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3540 
3541         return (zio);
3542 }
3543 
3544 static int
3545 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3546 {
3547 #ifdef _KERNEL
3548         uint64_t available_memory = ptob(freemem);
3549         static uint64_t page_load = 0;
3550         static uint64_t last_txg = 0;
3551 
3552 #if defined(__i386)
3553         available_memory =
3554             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3555 #endif
3556         if (available_memory >= zfs_write_limit_max)
3557                 return (0);
3558 
3559         if (txg > last_txg) {
3560                 last_txg = txg;
3561                 page_load = 0;
3562         }
3563         /*
3564          * If we are in pageout, we know that memory is already tight,
3565          * the arc is already going to be evicting, so we just want to
3566          * continue to let page writes occur as quickly as possible.
3567          */
3568         if (curproc == proc_pageout) {
3569                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3570                         return (SET_ERROR(ERESTART));
3571                 /* Note: reserve is inflated, so we deflate */
3572                 page_load += reserve / 8;
3573                 return (0);
3574         } else if (page_load > 0 && arc_reclaim_needed()) {
3575                 /* memory is low, delay before restarting */
3576                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3577                 return (SET_ERROR(EAGAIN));
3578         }
3579         page_load = 0;
3580 
3581         if (arc_size > arc_c_min) {
3582                 uint64_t evictable_memory =
3583                     arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3584                     arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3585                     arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3586                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3587                 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3588         }
3589 
3590         if (inflight_data > available_memory / 4) {
3591                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3592                 return (SET_ERROR(ERESTART));
3593         }
3594 #endif
3595         return (0);
3596 }
3597 
3598 void
3599 arc_tempreserve_clear(uint64_t reserve)
3600 {
3601         atomic_add_64(&arc_tempreserve, -reserve);
3602         ASSERT((int64_t)arc_tempreserve >= 0);
3603 }
3604 
3605 int
3606 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3607 {
3608         int error;
3609         uint64_t anon_size;
3610 
3611 #ifdef ZFS_DEBUG
3612         /*
3613          * Once in a while, fail for no reason.  Everything should cope.
3614          */
3615         if (spa_get_random(10000) == 0) {
3616                 dprintf("forcing random failure\n");
3617                 return (SET_ERROR(ERESTART));
3618         }
3619 #endif
3620         if (reserve > arc_c/4 && !arc_no_grow)
3621                 arc_c = MIN(arc_c_max, reserve * 4);
3622         if (reserve > arc_c)
3623                 return (SET_ERROR(ENOMEM));
3624 
3625         /*
3626          * Don't count loaned bufs as in flight dirty data to prevent long
3627          * network delays from blocking transactions that are ready to be
3628          * assigned to a txg.
3629          */
3630         anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3631 
3632         /*
3633          * Writes will, almost always, require additional memory allocations
3634          * in order to compress/encrypt/etc the data.  We therefore need to
3635          * make sure that there is sufficient available memory for this.
3636          */
3637         if (error = arc_memory_throttle(reserve, anon_size, txg))
3638                 return (error);
3639 
3640         /*
3641          * Throttle writes when the amount of dirty data in the cache
3642          * gets too large.  We try to keep the cache less than half full
3643          * of dirty blocks so that our sync times don't grow too large.
3644          * Note: if two requests come in concurrently, we might let them
3645          * both succeed, when one of them should fail.  Not a huge deal.
3646          */
3647 
3648         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3649             anon_size > arc_c / 4) {
3650                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3651                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3652                     arc_tempreserve>>10,
3653                     arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3654                     arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3655                     reserve>>10, arc_c>>10);
3656                 return (SET_ERROR(ERESTART));
3657         }
3658         atomic_add_64(&arc_tempreserve, reserve);
3659         return (0);
3660 }
3661 
3662 void
3663 arc_init(void)
3664 {
3665         mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3666         cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3667 
3668         /* Convert seconds to clock ticks */
3669         arc_min_prefetch_lifespan = 1 * hz;
3670 
3671         /* Start out with 1/8 of all memory */
3672         arc_c = physmem * PAGESIZE / 8;
3673 
3674 #ifdef _KERNEL
3675         /*
3676          * On architectures where the physical memory can be larger
3677          * than the addressable space (intel in 32-bit mode), we may
3678          * need to limit the cache to 1/8 of VM size.
3679          */
3680         arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3681 #endif
3682 
3683         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3684         arc_c_min = MAX(arc_c / 4, 64<<20);
3685         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3686         if (arc_c * 8 >= 1<<30)
3687                 arc_c_max = (arc_c * 8) - (1<<30);
3688         else
3689                 arc_c_max = arc_c_min;
3690         arc_c_max = MAX(arc_c * 6, arc_c_max);
3691 
3692         /*
3693          * Allow the tunables to override our calculations if they are
3694          * reasonable (ie. over 64MB)
3695          */
3696         if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3697                 arc_c_max = zfs_arc_max;
3698         if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3699                 arc_c_min = zfs_arc_min;
3700 
3701         arc_c = arc_c_max;
3702         arc_p = (arc_c >> 1);
3703 
3704         /* limit meta-data to 1/4 of the arc capacity */
3705         arc_meta_limit = arc_c_max / 4;
3706 
3707         /* Allow the tunable to override if it is reasonable */
3708         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3709                 arc_meta_limit = zfs_arc_meta_limit;
3710 
3711         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3712                 arc_c_min = arc_meta_limit / 2;
3713 
3714         if (zfs_arc_grow_retry > 0)
3715                 arc_grow_retry = zfs_arc_grow_retry;
3716 
3717         if (zfs_arc_shrink_shift > 0)
3718                 arc_shrink_shift = zfs_arc_shrink_shift;
3719 
3720         if (zfs_arc_p_min_shift > 0)
3721                 arc_p_min_shift = zfs_arc_p_min_shift;
3722 
3723         /* if kmem_flags are set, lets try to use less memory */
3724         if (kmem_debugging())
3725                 arc_c = arc_c / 2;
3726         if (arc_c < arc_c_min)
3727                 arc_c = arc_c_min;
3728 
3729         arc_anon = &ARC_anon;
3730         arc_mru = &ARC_mru;
3731         arc_mru_ghost = &ARC_mru_ghost;
3732         arc_mfu = &ARC_mfu;
3733         arc_mfu_ghost = &ARC_mfu_ghost;
3734         arc_l2c_only = &ARC_l2c_only;
3735         arc_size = 0;
3736 
3737         mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3738         mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3739         mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3740         mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3741         mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3742         mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3743 
3744         list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3745             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3746         list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3747             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3748         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3749             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3750         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3751             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3752         list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3753             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3754         list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3755             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3756         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3757             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3758         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3759             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3760         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3761             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3762         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3763             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3764 
3765         buf_init();
3766 
3767         arc_thread_exit = 0;
3768         arc_eviction_list = NULL;
3769         mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3770         bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3771 
3772         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3773             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3774 
3775         if (arc_ksp != NULL) {
3776                 arc_ksp->ks_data = &arc_stats;
3777                 kstat_install(arc_ksp);
3778         }
3779 
3780         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3781             TS_RUN, minclsyspri);
3782 
3783         arc_dead = FALSE;
3784         arc_warm = B_FALSE;
3785 
3786         if (zfs_write_limit_max == 0)
3787                 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3788         else
3789                 zfs_write_limit_shift = 0;
3790         mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3791 }
3792 
3793 void
3794 arc_fini(void)
3795 {
3796         mutex_enter(&arc_reclaim_thr_lock);
3797         arc_thread_exit = 1;
3798         while (arc_thread_exit != 0)
3799                 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3800         mutex_exit(&arc_reclaim_thr_lock);
3801 
3802         arc_flush(NULL);
3803 
3804         arc_dead = TRUE;
3805 
3806         if (arc_ksp != NULL) {
3807                 kstat_delete(arc_ksp);
3808                 arc_ksp = NULL;
3809         }
3810 
3811         mutex_destroy(&arc_eviction_mtx);
3812         mutex_destroy(&arc_reclaim_thr_lock);
3813         cv_destroy(&arc_reclaim_thr_cv);
3814 
3815         list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3816         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3817         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3818         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3819         list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3820         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3821         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3822         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3823 
3824         mutex_destroy(&arc_anon->arcs_mtx);
3825         mutex_destroy(&arc_mru->arcs_mtx);
3826         mutex_destroy(&arc_mru_ghost->arcs_mtx);
3827         mutex_destroy(&arc_mfu->arcs_mtx);
3828         mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3829         mutex_destroy(&arc_l2c_only->arcs_mtx);
3830 
3831         mutex_destroy(&zfs_write_limit_lock);
3832 
3833         buf_fini();
3834 
3835         ASSERT(arc_loaned_bytes == 0);
3836 }
3837 
3838 /*
3839  * Level 2 ARC
3840  *
3841  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3842  * It uses dedicated storage devices to hold cached data, which are populated
3843  * using large infrequent writes.  The main role of this cache is to boost
3844  * the performance of random read workloads.  The intended L2ARC devices
3845  * include short-stroked disks, solid state disks, and other media with
3846  * substantially faster read latency than disk.
3847  *
3848  *                 +-----------------------+
3849  *                 |         ARC           |
3850  *                 +-----------------------+
3851  *                    |         ^     ^
3852  *                    |         |     |
3853  *      l2arc_feed_thread()    arc_read()
3854  *                    |         |     |
3855  *                    |  l2arc read   |
3856  *                    V         |     |
3857  *               +---------------+    |
3858  *               |     L2ARC     |    |
3859  *               +---------------+    |
3860  *                   |    ^           |
3861  *          l2arc_write() |           |
3862  *                   |    |           |
3863  *                   V    |           |
3864  *                 +-------+      +-------+
3865  *                 | vdev  |      | vdev  |
3866  *                 | cache |      | cache |
3867  *                 +-------+      +-------+
3868  *                 +=========+     .-----.
3869  *                 :  L2ARC  :    |-_____-|
3870  *                 : devices :    | Disks |
3871  *                 +=========+    `-_____-'
3872  *
3873  * Read requests are satisfied from the following sources, in order:
3874  *
3875  *      1) ARC
3876  *      2) vdev cache of L2ARC devices
3877  *      3) L2ARC devices
3878  *      4) vdev cache of disks
3879  *      5) disks
3880  *
3881  * Some L2ARC device types exhibit extremely slow write performance.
3882  * To accommodate for this there are some significant differences between
3883  * the L2ARC and traditional cache design:
3884  *
3885  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3886  * the ARC behave as usual, freeing buffers and placing headers on ghost
3887  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3888  * this would add inflated write latencies for all ARC memory pressure.
3889  *
3890  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3891  * It does this by periodically scanning buffers from the eviction-end of
3892  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3893  * not already there. It scans until a headroom of buffers is satisfied,
3894  * which itself is a buffer for ARC eviction. If a compressible buffer is
3895  * found during scanning and selected for writing to an L2ARC device, we
3896  * temporarily boost scanning headroom during the next scan cycle to make
3897  * sure we adapt to compression effects (which might significantly reduce
3898  * the data volume we write to L2ARC). The thread that does this is
3899  * l2arc_feed_thread(), illustrated below; example sizes are included to
3900  * provide a better sense of ratio than this diagram:
3901  *
3902  *             head -->                        tail
3903  *              +---------------------+----------+
3904  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3905  *              +---------------------+----------+   |   o L2ARC eligible
3906  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3907  *              +---------------------+----------+   |
3908  *                   15.9 Gbytes      ^ 32 Mbytes    |
3909  *                                 headroom          |
3910  *                                            l2arc_feed_thread()
3911  *                                                   |
3912  *                       l2arc write hand <--[oooo]--'
3913  *                               |           8 Mbyte
3914  *                               |          write max
3915  *                               V
3916  *                +==============================+
3917  *      L2ARC dev |####|#|###|###|    |####| ... |
3918  *                +==============================+
3919  *                           32 Gbytes
3920  *
3921  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3922  * evicted, then the L2ARC has cached a buffer much sooner than it probably
3923  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
3924  * safe to say that this is an uncommon case, since buffers at the end of
3925  * the ARC lists have moved there due to inactivity.
3926  *
3927  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3928  * then the L2ARC simply misses copying some buffers.  This serves as a
3929  * pressure valve to prevent heavy read workloads from both stalling the ARC
3930  * with waits and clogging the L2ARC with writes.  This also helps prevent
3931  * the potential for the L2ARC to churn if it attempts to cache content too
3932  * quickly, such as during backups of the entire pool.
3933  *
3934  * 5. After system boot and before the ARC has filled main memory, there are
3935  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3936  * lists can remain mostly static.  Instead of searching from tail of these
3937  * lists as pictured, the l2arc_feed_thread() will search from the list heads
3938  * for eligible buffers, greatly increasing its chance of finding them.
3939  *
3940  * The L2ARC device write speed is also boosted during this time so that
3941  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
3942  * there are no L2ARC reads, and no fear of degrading read performance
3943  * through increased writes.
3944  *
3945  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3946  * the vdev queue can aggregate them into larger and fewer writes.  Each
3947  * device is written to in a rotor fashion, sweeping writes through
3948  * available space then repeating.
3949  *
3950  * 7. The L2ARC does not store dirty content.  It never needs to flush
3951  * write buffers back to disk based storage.
3952  *
3953  * 8. If an ARC buffer is written (and dirtied) which also exists in the
3954  * L2ARC, the now stale L2ARC buffer is immediately dropped.
3955  *
3956  * The performance of the L2ARC can be tweaked by a number of tunables, which
3957  * may be necessary for different workloads:
3958  *
3959  *      l2arc_write_max         max write bytes per interval
3960  *      l2arc_write_boost       extra write bytes during device warmup
3961  *      l2arc_noprefetch        skip caching prefetched buffers
3962  *      l2arc_headroom          number of max device writes to precache
3963  *      l2arc_headroom_boost    when we find compressed buffers during ARC
3964  *                              scanning, we multiply headroom by this
3965  *                              percentage factor for the next scan cycle,
3966  *                              since more compressed buffers are likely to
3967  *                              be present
3968  *      l2arc_feed_secs         seconds between L2ARC writing
3969  *
3970  * Tunables may be removed or added as future performance improvements are
3971  * integrated, and also may become zpool properties.
3972  *
3973  * There are three key functions that control how the L2ARC warms up:
3974  *
3975  *      l2arc_write_eligible()  check if a buffer is eligible to cache
3976  *      l2arc_write_size()      calculate how much to write
3977  *      l2arc_write_interval()  calculate sleep delay between writes
3978  *
3979  * These three functions determine what to write, how much, and how quickly
3980  * to send writes.
3981  */
3982 
3983 static boolean_t
3984 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3985 {
3986         /*
3987          * A buffer is *not* eligible for the L2ARC if it:
3988          * 1. belongs to a different spa.
3989          * 2. is already cached on the L2ARC.
3990          * 3. has an I/O in progress (it may be an incomplete read).
3991          * 4. is flagged not eligible (zfs property).
3992          */
3993         if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3994             HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3995                 return (B_FALSE);
3996 
3997         return (B_TRUE);
3998 }
3999 
4000 static uint64_t
4001 l2arc_write_size(void)
4002 {
4003         uint64_t size;
4004 
4005         /*
4006          * Make sure our globals have meaningful values in case the user
4007          * altered them.
4008          */
4009         size = l2arc_write_max;
4010         if (size == 0) {
4011                 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4012                     "be greater than zero, resetting it to the default (%d)",
4013                     L2ARC_WRITE_SIZE);
4014                 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4015         }
4016 
4017         if (arc_warm == B_FALSE)
4018                 size += l2arc_write_boost;
4019 
4020         return (size);
4021 
4022 }
4023 
4024 static clock_t
4025 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4026 {
4027         clock_t interval, next, now;
4028 
4029         /*
4030          * If the ARC lists are busy, increase our write rate; if the
4031          * lists are stale, idle back.  This is achieved by checking
4032          * how much we previously wrote - if it was more than half of
4033          * what we wanted, schedule the next write much sooner.
4034          */
4035         if (l2arc_feed_again && wrote > (wanted / 2))
4036                 interval = (hz * l2arc_feed_min_ms) / 1000;
4037         else
4038                 interval = hz * l2arc_feed_secs;
4039 
4040         now = ddi_get_lbolt();
4041         next = MAX(now, MIN(now + interval, began + interval));
4042 
4043         return (next);
4044 }
4045 
4046 static void
4047 l2arc_hdr_stat_add(void)
4048 {
4049         ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4050         ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4051 }
4052 
4053 static void
4054 l2arc_hdr_stat_remove(void)
4055 {
4056         ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4057         ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4058 }
4059 
4060 /*
4061  * Cycle through L2ARC devices.  This is how L2ARC load balances.
4062  * If a device is returned, this also returns holding the spa config lock.
4063  */
4064 static l2arc_dev_t *
4065 l2arc_dev_get_next(void)
4066 {
4067         l2arc_dev_t *first, *next = NULL;
4068 
4069         /*
4070          * Lock out the removal of spas (spa_namespace_lock), then removal
4071          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
4072          * both locks will be dropped and a spa config lock held instead.
4073          */
4074         mutex_enter(&spa_namespace_lock);
4075         mutex_enter(&l2arc_dev_mtx);
4076 
4077         /* if there are no vdevs, there is nothing to do */
4078         if (l2arc_ndev == 0)
4079                 goto out;
4080 
4081         first = NULL;
4082         next = l2arc_dev_last;
4083         do {
4084                 /* loop around the list looking for a non-faulted vdev */
4085                 if (next == NULL) {
4086                         next = list_head(l2arc_dev_list);
4087                 } else {
4088                         next = list_next(l2arc_dev_list, next);
4089                         if (next == NULL)
4090                                 next = list_head(l2arc_dev_list);
4091                 }
4092 
4093                 /* if we have come back to the start, bail out */
4094                 if (first == NULL)
4095                         first = next;
4096                 else if (next == first)
4097                         break;
4098 
4099         } while (vdev_is_dead(next->l2ad_vdev));
4100 
4101         /* if we were unable to find any usable vdevs, return NULL */
4102         if (vdev_is_dead(next->l2ad_vdev))
4103                 next = NULL;
4104 
4105         l2arc_dev_last = next;
4106 
4107 out:
4108         mutex_exit(&l2arc_dev_mtx);
4109 
4110         /*
4111          * Grab the config lock to prevent the 'next' device from being
4112          * removed while we are writing to it.
4113          */
4114         if (next != NULL)
4115                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4116         mutex_exit(&spa_namespace_lock);
4117 
4118         return (next);
4119 }
4120 
4121 /*
4122  * Free buffers that were tagged for destruction.
4123  */
4124 static void
4125 l2arc_do_free_on_write(void)
4126 {
4127         list_t *buflist;
4128         l2arc_data_free_t *df, *df_prev;
4129 
4130         mutex_enter(&l2arc_free_on_write_mtx);
4131         buflist = l2arc_free_on_write;
4132 
4133         for (df = list_tail(buflist); df; df = df_prev) {
4134                 df_prev = list_prev(buflist, df);
4135                 ASSERT(df->l2df_data != NULL);
4136                 ASSERT(df->l2df_func != NULL);
4137                 df->l2df_func(df->l2df_data, df->l2df_size);
4138                 list_remove(buflist, df);
4139                 kmem_free(df, sizeof (l2arc_data_free_t));
4140         }
4141 
4142         mutex_exit(&l2arc_free_on_write_mtx);
4143 }
4144 
4145 /*
4146  * A write to a cache device has completed.  Update all headers to allow
4147  * reads from these buffers to begin.
4148  */
4149 static void
4150 l2arc_write_done(zio_t *zio)
4151 {
4152         l2arc_write_callback_t *cb;
4153         l2arc_dev_t *dev;
4154         list_t *buflist;
4155         arc_buf_hdr_t *head, *ab;
4156 
4157         struct defer_done_entry {
4158                 arc_buf_hdr_t *dde_buf;
4159                 list_node_t dde_node;
4160         } *dde, *dde_next;
4161         list_t defer_done_list;
4162 
4163         cb = zio->io_private;
4164         ASSERT(cb != NULL);
4165         dev = cb->l2wcb_dev;
4166         ASSERT(dev != NULL);
4167         head = cb->l2wcb_head;
4168         ASSERT(head != NULL);
4169         buflist = dev->l2ad_buflist;
4170         ASSERT(buflist != NULL);
4171         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4172             l2arc_write_callback_t *, cb);
4173 
4174         if (zio->io_error != 0)
4175                 ARCSTAT_BUMP(arcstat_l2_writes_error);
4176 
4177         mutex_enter(&l2arc_buflist_mtx);
4178 
4179         /*
4180          * All writes completed, or an error was hit.
4181          */
4182         list_create(&defer_done_list, sizeof (*dde),
4183             offsetof(struct defer_done_entry, dde_node));
4184         for (ab = list_prev(buflist, head); ab; ab = list_prev(buflist, ab)) {
4185                 /*
4186                  * Can't pause here to grab hash_lock while also holding
4187                  * l2arc_buflist_mtx, so place the buffers on a temporary
4188                  * thread-local list for later processing.
4189                  */
4190                 dde = kmem_alloc(sizeof (*dde), KM_SLEEP);
4191                 dde->dde_buf = ab;
4192                 list_insert_tail(&defer_done_list, dde);
4193         }
4194 
4195         atomic_inc_64(&l2arc_writes_done);
4196         list_remove(buflist, head);
4197         kmem_cache_free(hdr_cache, head);
4198         mutex_exit(&l2arc_buflist_mtx);
4199 
4200         /*
4201          * Now process the buffers. We're not holding l2arc_buflist_mtx
4202          * anymore, so we can do a regular mutex_enter on the hash_lock.
4203          */
4204         for (dde = list_head(&defer_done_list); dde != NULL; dde = dde_next) {
4205                 kmutex_t *hash_lock;
4206 
4207                 dde_next = list_next(&defer_done_list, dde);
4208                 ab = dde->dde_buf;
4209                 hash_lock = HDR_LOCK(ab);
4210 
4211                 mutex_enter(hash_lock);
4212 
4213                 if (zio->io_error != 0) {
4214                         /*
4215                          * Error - drop L2ARC entry.
4216                          */
4217                         l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
4218                         mutex_enter(&l2arc_buflist_mtx);
4219                         list_remove(buflist, ab);
4220                         mutex_exit(&l2arc_buflist_mtx);
4221                         ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
4222                         ab->b_l2hdr = NULL;
4223                         kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
4224                         ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4225                 }
4226 
4227                 /*
4228                  * Allow ARC to begin reads to this L2ARC entry.
4229                  */
4230                 ab->b_flags &= ~ARC_L2_WRITING;
4231 
4232                 mutex_exit(hash_lock);
4233 
4234                 list_remove(&defer_done_list, dde);
4235         }
4236         list_destroy(&defer_done_list);
4237 
4238         l2arc_do_free_on_write();
4239 
4240         kmem_free(cb, sizeof (l2arc_write_callback_t));
4241 }
4242 
4243 /*
4244  * A read to a cache device completed.  Validate buffer contents before
4245  * handing over to the regular ARC routines.
4246  */
4247 static void
4248 l2arc_read_done(zio_t *zio)
4249 {
4250         l2arc_read_callback_t *cb;
4251         arc_buf_hdr_t *hdr;
4252         arc_buf_t *buf;
4253         kmutex_t *hash_lock;
4254         int equal;
4255 
4256         ASSERT(zio->io_vd != NULL);
4257         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4258 
4259         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4260 
4261         cb = zio->io_private;
4262         ASSERT(cb != NULL);
4263         buf = cb->l2rcb_buf;
4264         ASSERT(buf != NULL);
4265 
4266         hash_lock = HDR_LOCK(buf->b_hdr);
4267         mutex_enter(hash_lock);
4268         hdr = buf->b_hdr;
4269         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4270 
4271         /*
4272          * If the buffer was compressed, decompress it first.
4273          */
4274         if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4275                 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4276         ASSERT(zio->io_data != NULL);
4277 
4278         /*
4279          * Check this survived the L2ARC journey.
4280          */
4281         equal = arc_cksum_equal(buf);
4282         if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4283                 mutex_exit(hash_lock);
4284                 zio->io_private = buf;
4285                 zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
4286                 zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
4287                 arc_read_done(zio);
4288         } else {
4289                 mutex_exit(hash_lock);
4290                 /*
4291                  * Buffer didn't survive caching.  Increment stats and
4292                  * reissue to the original storage device.
4293                  */
4294                 if (zio->io_error != 0) {
4295                         ARCSTAT_BUMP(arcstat_l2_io_error);
4296                 } else {
4297                         zio->io_error = SET_ERROR(EIO);
4298                 }
4299                 if (!equal)
4300                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4301 
4302                 /*
4303                  * If there's no waiter, issue an async i/o to the primary
4304                  * storage now.  If there *is* a waiter, the caller must
4305                  * issue the i/o in a context where it's OK to block.
4306                  */
4307                 if (zio->io_waiter == NULL) {
4308                         zio_t *pio = zio_unique_parent(zio);
4309 
4310                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4311 
4312                         zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4313                             buf->b_data, zio->io_size, arc_read_done, buf,
4314                             zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4315                 }
4316         }
4317 
4318         kmem_free(cb, sizeof (l2arc_read_callback_t));
4319 }
4320 
4321 /*
4322  * This is the list priority from which the L2ARC will search for pages to
4323  * cache.  This is used within loops (0..3) to cycle through lists in the
4324  * desired order.  This order can have a significant effect on cache
4325  * performance.
4326  *
4327  * Currently the metadata lists are hit first, MFU then MRU, followed by
4328  * the data lists.  This function returns a locked list, and also returns
4329  * the lock pointer.
4330  */
4331 static list_t *
4332 l2arc_list_locked(int list_num, kmutex_t **lock)
4333 {
4334         list_t *list = NULL;
4335 
4336         ASSERT(list_num >= 0 && list_num <= 3);
4337 
4338         switch (list_num) {
4339         case 0:
4340                 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4341                 *lock = &arc_mfu->arcs_mtx;
4342                 break;
4343         case 1:
4344                 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4345                 *lock = &arc_mru->arcs_mtx;
4346                 break;
4347         case 2:
4348                 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4349                 *lock = &arc_mfu->arcs_mtx;
4350                 break;
4351         case 3:
4352                 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4353                 *lock = &arc_mru->arcs_mtx;
4354                 break;
4355         }
4356 
4357         ASSERT(!(MUTEX_HELD(*lock)));
4358         mutex_enter(*lock);
4359         return (list);
4360 }
4361 
4362 /*
4363  * Evict buffers from the device write hand to the distance specified in
4364  * bytes.  This distance may span populated buffers, it may span nothing.
4365  * This is clearing a region on the L2ARC device ready for writing.
4366  * If the 'all' boolean is set, every buffer is evicted.
4367  */
4368 static void
4369 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4370 {
4371         list_t *buflist;
4372         l2arc_buf_hdr_t *l2hdr;
4373         arc_buf_hdr_t *ab, *ab_prev;
4374         kmutex_t *hash_lock;
4375         uint64_t taddr;
4376 
4377         buflist = dev->l2ad_buflist;
4378 
4379         if (buflist == NULL)
4380                 return;
4381 
4382         if (!all && dev->l2ad_first) {
4383                 /*
4384                  * This is the first sweep through the device.  There is
4385                  * nothing to evict.
4386                  */
4387                 return;
4388         }
4389 
4390         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4391                 /*
4392                  * When nearing the end of the device, evict to the end
4393                  * before the device write hand jumps to the start.
4394                  */
4395                 taddr = dev->l2ad_end;
4396         } else {
4397                 taddr = dev->l2ad_hand + distance;
4398         }
4399         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4400             uint64_t, taddr, boolean_t, all);
4401 
4402 top:
4403         mutex_enter(&l2arc_buflist_mtx);
4404         for (ab = list_tail(buflist); ab; ab = ab_prev) {
4405                 ab_prev = list_prev(buflist, ab);
4406 
4407                 hash_lock = HDR_LOCK(ab);
4408                 if (!mutex_tryenter(hash_lock)) {
4409                         /*
4410                          * Missed the hash lock.  Retry.
4411                          */
4412                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4413                         mutex_exit(&l2arc_buflist_mtx);
4414                         mutex_enter(hash_lock);
4415                         mutex_exit(hash_lock);
4416                         goto top;
4417                 }
4418 
4419                 if (HDR_L2_WRITE_HEAD(ab)) {
4420                         /*
4421                          * We hit a write head node.  Leave it for
4422                          * l2arc_write_done().
4423                          */
4424                         list_remove(buflist, ab);
4425                         mutex_exit(hash_lock);
4426                         continue;
4427                 }
4428 
4429                 if (!all && ab->b_l2hdr != NULL &&
4430                     (ab->b_l2hdr->b_daddr > taddr ||
4431                     ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4432                         /*
4433                          * We've evicted to the target address,
4434                          * or the end of the device.
4435                          */
4436                         mutex_exit(hash_lock);
4437                         break;
4438                 }
4439 
4440                 if (HDR_FREE_IN_PROGRESS(ab)) {
4441                         /*
4442                          * Already on the path to destruction.
4443                          */
4444                         mutex_exit(hash_lock);
4445                         continue;
4446                 }
4447 
4448                 if (ab->b_state == arc_l2c_only) {
4449                         ASSERT(!HDR_L2_READING(ab));
4450                         /*
4451                          * This doesn't exist in the ARC.  Destroy.
4452                          * arc_hdr_destroy() will call list_remove()
4453                          * and decrement arcstat_l2_size.
4454                          */
4455                         arc_change_state(arc_anon, ab, hash_lock);
4456                         arc_hdr_destroy(ab);
4457                 } else {
4458                         /*
4459                          * Invalidate issued or about to be issued
4460                          * reads, since we may be about to write
4461                          * over this location.
4462                          */
4463                         if (HDR_L2_READING(ab)) {
4464                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4465                                 ab->b_flags |= ARC_L2_EVICTED;
4466                         }
4467 
4468                         /*
4469                          * Tell ARC this no longer exists in L2ARC.
4470                          */
4471                         if (ab->b_l2hdr != NULL) {
4472                                 l2hdr = ab->b_l2hdr;
4473                                 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
4474                                 ab->b_l2hdr = NULL;
4475                                 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
4476                                 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4477                         }
4478                         list_remove(buflist, ab);
4479 
4480                         /*
4481                          * This may have been leftover after a
4482                          * failed write.
4483                          */
4484                         ab->b_flags &= ~ARC_L2_WRITING;
4485                 }
4486                 mutex_exit(hash_lock);
4487         }
4488         mutex_exit(&l2arc_buflist_mtx);
4489 
4490         vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4491         dev->l2ad_evict = taddr;
4492 }
4493 
4494 /*
4495  * Find and write ARC buffers to the L2ARC device.
4496  *
4497  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4498  * for reading until they have completed writing.
4499  * The headroom_boost is an in-out parameter used to maintain headroom boost
4500  * state between calls to this function.
4501  *
4502  * Returns the number of bytes actually written (which may be smaller than
4503  * the delta by which the device hand has changed due to alignment).
4504  */
4505 static uint64_t
4506 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4507     boolean_t *headroom_boost)
4508 {
4509         arc_buf_hdr_t *ab, *ab_prev, *head;
4510         list_t *list;
4511         uint64_t write_asize, write_psize, write_sz, headroom,
4512             buf_compress_minsz;
4513         void *buf_data;
4514         kmutex_t *list_lock;
4515         boolean_t full;
4516         l2arc_write_callback_t *cb;
4517         zio_t *pio, *wzio;
4518         uint64_t guid = spa_load_guid(spa);
4519         const boolean_t do_headroom_boost = *headroom_boost;
4520         struct defer_write_entry {
4521                 arc_buf_hdr_t *dwe_buf;
4522                 void *dwe_orig_data;
4523                 uint64_t dwe_orig_size;
4524                 list_node_t *dwe_node;
4525         } *dwe, *dwe_next;
4526         list_t defer_write_list;
4527 
4528         ASSERT(dev->l2ad_vdev != NULL);
4529 
4530         /* Lower the flag now, we might want to raise it again later. */
4531         *headroom_boost = B_FALSE;
4532 
4533         pio = NULL;
4534         write_sz = write_asize = write_psize = 0;
4535         full = B_FALSE;
4536         head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4537         head->b_flags |= ARC_L2_WRITE_HEAD;
4538 
4539         /*
4540          * We will want to try to compress buffers that are at least 2x the
4541          * device sector size.
4542          */
4543         buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4544 
4545         /*
4546          * Copy buffers for L2ARC writing.
4547          */
4548         list_create(&defer_write_list, sizeof (*dwe),
4549             offsetof(struct defer_write_entry, dwe_node));
4550         mutex_enter(&l2arc_buflist_mtx);
4551         for (int try = 0; try <= 3; try++) {
4552                 uint64_t passed_sz = 0;
4553 
4554                 list = l2arc_list_locked(try, &list_lock);
4555 
4556                 /*
4557                  * L2ARC fast warmup.
4558                  *
4559                  * Until the ARC is warm and starts to evict, read from the
4560                  * head of the ARC lists rather than the tail.
4561                  */
4562                 if (arc_warm == B_FALSE)
4563                         ab = list_head(list);
4564                 else
4565                         ab = list_tail(list);
4566 
4567                 headroom = target_sz * l2arc_headroom;
4568                 if (do_headroom_boost)
4569                         headroom = (headroom * l2arc_headroom_boost) / 100;
4570 
4571                 for (; ab; ab = ab_prev) {
4572                         l2arc_buf_hdr_t *l2hdr;
4573                         kmutex_t *hash_lock;
4574 
4575                         if (arc_warm == B_FALSE)
4576                                 ab_prev = list_next(list, ab);
4577                         else
4578                                 ab_prev = list_prev(list, ab);
4579 
4580                         hash_lock = HDR_LOCK(ab);
4581                         if (!mutex_tryenter(hash_lock)) {
4582                                 /*
4583                                  * Skip this buffer rather than waiting.
4584                                  */
4585                                 continue;
4586                         }
4587 
4588                         passed_sz += ab->b_size;
4589                         if (passed_sz > headroom) {
4590                                 /*
4591                                  * Searched too far.
4592                                  */
4593                                 mutex_exit(hash_lock);
4594                                 break;
4595                         }
4596 
4597                         if (!l2arc_write_eligible(guid, ab)) {
4598                                 mutex_exit(hash_lock);
4599                                 continue;
4600                         }
4601 
4602                         if ((write_sz + ab->b_size) > target_sz) {
4603                                 full = B_TRUE;
4604                                 mutex_exit(hash_lock);
4605                                 break;
4606                         }
4607 
4608                         if (pio == NULL) {
4609                                 /*
4610                                  * Insert a dummy header on the buflist so
4611                                  * l2arc_write_done() can find where the
4612                                  * write buffers begin without searching.
4613                                  */
4614                                 list_insert_head(dev->l2ad_buflist, head);
4615 
4616                                 cb = kmem_alloc(
4617                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
4618                                 cb->l2wcb_dev = dev;
4619                                 cb->l2wcb_head = head;
4620                                 pio = zio_root(spa, l2arc_write_done, cb,
4621                                     ZIO_FLAG_CANFAIL);
4622                         }
4623 
4624                         /*
4625                          * Create and add a new L2ARC header.
4626                          */
4627                         l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4628                         l2hdr->b_dev = dev;
4629                         ab->b_flags |= ARC_L2_WRITING;
4630                         l2hdr->b_compress = ZIO_COMPRESS_OFF;
4631                         l2hdr->b_asize = ab->b_size;
4632 
4633                         /*
4634                          * Temporarily stash the buffer in defer_write_entries.
4635                          * The subsequent write step will pick it up from
4636                          * there. This is because we can't access ab->b_buf
4637                          * without holding the hash_lock, which we in turn
4638                          * can't access without holding the ARC list locks
4639                          * while walking the ARC lists (we want to avoid
4640                          * holding these locks during compression/writing).
4641                          */
4642                         dwe = kmem_alloc(sizeof (*dwe), KM_SLEEP);
4643                         dwe->dwe_buf = ab;
4644                         dwe->dwe_orig_data = ab->b_buf->b_data;
4645                         dwe->dwe_orig_size = ab->b_size;
4646 
4647                         ab->b_l2hdr = l2hdr;
4648 
4649                         list_insert_head(dev->l2ad_buflist, ab);
4650                         list_insert_tail(&defer_write_list, dwe);
4651 
4652                         /*
4653                          * Compute and store the buffer cksum before
4654                          * writing.  On debug the cksum is verified first.
4655                          */
4656                         arc_cksum_verify(ab->b_buf);
4657                         arc_cksum_compute(ab->b_buf, B_TRUE);
4658 
4659                         mutex_exit(hash_lock);
4660 
4661                         write_sz += dwe->dwe_orig_size;
4662                 }
4663 
4664                 mutex_exit(list_lock);
4665 
4666                 if (full == B_TRUE)
4667                         break;
4668         }
4669 
4670         /* No buffers selected for writing? */
4671         if (pio == NULL) {
4672                 ASSERT0(write_sz);
4673                 mutex_exit(&l2arc_buflist_mtx);
4674                 kmem_cache_free(hdr_cache, head);
4675                 list_destroy(&defer_write_list);
4676                 return (0);
4677         }
4678 
4679         mutex_exit(&l2arc_buflist_mtx);
4680 
4681         /*
4682          * Now start writing the buffers. We're starting at the write head
4683          * and work backwards, retracing the course of the buffer selector
4684          * loop above.
4685          */
4686         for (dwe = list_head(&defer_write_list); dwe != NULL; dwe = dwe_next) {
4687                 l2arc_buf_hdr_t *l2hdr;
4688                 uint64_t buf_sz;
4689 
4690                 dwe_next = list_next(&defer_write_list, dwe);
4691                 ab = dwe->dwe_buf;
4692 
4693                 /*
4694                  * Accessing ab->b_l2hdr without locking is safe here because
4695                  * we're holding the l2arc_buflist_mtx and no other thread will
4696                  * ever directly modify the L2 fields. In particular ab->b_buf
4697                  * may be invalid by now due to ARC eviction.
4698                  */
4699                 l2hdr = ab->b_l2hdr;
4700                 l2hdr->b_daddr = dev->l2ad_hand;
4701 
4702                 if ((ab->b_flags & ARC_L2COMPRESS) &&
4703                     l2hdr->b_asize >= buf_compress_minsz &&
4704                     l2arc_compress_buf(dwe->dwe_orig_data, dwe->dwe_orig_size,
4705                     &buf_data, &buf_sz, &l2hdr->b_compress)) {
4706                         /*
4707                          * If compression succeeded, enable headroom
4708                          * boost on the next scan cycle.
4709                          */
4710                         *headroom_boost = B_TRUE;
4711                         l2hdr->b_asize = buf_sz;
4712                 } else {
4713                         buf_data = dwe->dwe_orig_data;
4714                         buf_sz = dwe->dwe_orig_size;
4715                         l2hdr->b_asize = dwe->dwe_orig_size;
4716                 }
4717 
4718                 /* Compression may have squashed the buffer to zero length. */
4719                 if (buf_sz != 0) {
4720                         uint64_t buf_p_sz;
4721 
4722                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
4723                             dev->l2ad_hand, l2hdr->b_asize, buf_data,
4724                             ZIO_CHECKSUM_OFF, NULL, NULL,
4725                             ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL,
4726                             B_FALSE);
4727 
4728                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4729                             zio_t *, wzio);
4730                         (void) zio_nowait(wzio);
4731 
4732                         write_asize += l2hdr->b_asize;
4733                         /*
4734                          * Keep the clock hand suitably device-aligned.
4735                          */
4736                         buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4737                         write_psize += buf_p_sz;
4738                         dev->l2ad_hand += buf_p_sz;
4739                 }
4740 
4741                 list_remove(&defer_write_list, dwe);
4742                 kmem_free(dwe, sizeof (*dwe));
4743         }
4744 
4745         list_destroy(&defer_write_list);
4746 
4747         ASSERT3U(write_asize, <=, target_sz);
4748         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4749         ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
4750         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4751         ARCSTAT_INCR(arcstat_l2_asize, write_asize);
4752         vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
4753 
4754         /*
4755          * Bump device hand to the device start if it is approaching the end.
4756          * l2arc_evict() will already have evicted ahead for this case.
4757          */
4758         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4759                 vdev_space_update(dev->l2ad_vdev,
4760                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4761                 dev->l2ad_hand = dev->l2ad_start;
4762                 dev->l2ad_evict = dev->l2ad_start;
4763                 dev->l2ad_first = B_FALSE;
4764         }
4765 
4766         dev->l2ad_writing = B_TRUE;
4767         (void) zio_wait(pio);
4768         dev->l2ad_writing = B_FALSE;
4769 
4770         return (write_asize);
4771 }
4772 
4773 /*
4774  * Compresses an L2ARC buffer.
4775  * The data to be compressed is in in_data and its size in in_sz. This routine
4776  * tries to compress the data and depending on the compression result there
4777  * are three possible outcomes:
4778  * *) The buffer was incompressible. The function returns with B_FALSE and
4779  *    does nothing else.
4780  * *) The buffer was all-zeros, so there is no need to write it to an L2
4781  *    device. To indicate this situation, the *out_data is set to NULL,
4782  *    *out_sz is set to zero, *compress is set to ZIO_COMPRESS_EMPTY and
4783  *    the function returns B_TRUE.
4784  * *) Compression succeeded and *out_data was set to point to a buffer holding
4785  *    the compressed data buffer, *out_sz was set to indicate the output size,
4786  *    *compress was set to the appropriate compression algorithm and B_TRUE is
4787  *    returned. Once writing is done the buffer will be automatically freed by
4788  *    l2arc_do_free_on_write().
4789  */
4790 static boolean_t
4791 l2arc_compress_buf(void *in_data, uint64_t in_sz, void **out_data,
4792     uint64_t *out_sz, enum zio_compress *compress)
4793 {
4794         void *cdata;
4795 
4796         cdata = zio_data_buf_alloc(in_sz);
4797         *out_sz = zio_compress_data(ZIO_COMPRESS_LZ4, in_data, cdata, in_sz);
4798 
4799         if (*out_sz == 0) {
4800                 /* Zero block, indicate that there's nothing to write. */
4801                 zio_data_buf_free(cdata, in_sz);
4802                 *compress = ZIO_COMPRESS_EMPTY;
4803                 *out_data = NULL;
4804                 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
4805                 return (B_TRUE);
4806         } else if (*out_sz > 0 && *out_sz < in_sz) {
4807                 /*
4808                  * Compression succeeded, we'll keep the cdata around for
4809                  * writing and release it after writing.
4810                  */
4811                 l2arc_data_free_t *df;
4812 
4813                 *compress = ZIO_COMPRESS_LZ4;
4814                 *out_data = cdata;
4815 
4816                 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
4817                 df->l2df_data = cdata;
4818                 df->l2df_size = *out_sz;
4819                 df->l2df_func = zio_data_buf_free;
4820                 mutex_enter(&l2arc_free_on_write_mtx);
4821                 list_insert_head(l2arc_free_on_write, df);
4822                 mutex_exit(&l2arc_free_on_write_mtx);
4823 
4824                 ARCSTAT_BUMP(arcstat_l2_compress_successes);
4825                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
4826                 return (B_TRUE);
4827         } else {
4828                 /*
4829                  * Compression failed, release the compressed buffer.
4830                  */
4831                 zio_data_buf_free(cdata, in_sz);
4832                 ARCSTAT_BUMP(arcstat_l2_compress_failures);
4833                 return (B_FALSE);
4834         }
4835 }
4836 
4837 /*
4838  * Decompresses a zio read back from an l2arc device. On success, the
4839  * underlying zio's io_data buffer is overwritten by the uncompressed
4840  * version. On decompression error (corrupt compressed stream), the
4841  * zio->io_error value is set to signal an I/O error.
4842  *
4843  * Please note that the compressed data stream is not checksummed, so
4844  * if the underlying device is experiencing data corruption, we may feed
4845  * corrupt data to the decompressor, so the decompressor needs to be
4846  * able to handle this situation (LZ4 does).
4847  */
4848 static void
4849 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
4850 {
4851         ASSERT(L2ARC_IS_VALID_COMPRESS(c));
4852 
4853         if (zio->io_error != 0) {
4854                 /*
4855                  * An io error has occured, just restore the original io
4856                  * size in preparation for a main pool read.
4857                  */
4858                 zio->io_orig_size = zio->io_size = hdr->b_size;
4859                 return;
4860         }
4861 
4862         if (c == ZIO_COMPRESS_EMPTY) {
4863                 /*
4864                  * An empty buffer results in a null zio, which means we
4865                  * need to fill its io_data after we're done restoring the
4866                  * buffer's contents.
4867                  */
4868                 ASSERT(hdr->b_buf != NULL);
4869                 bzero(hdr->b_buf->b_data, hdr->b_size);
4870                 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
4871         } else {
4872                 ASSERT(zio->io_data != NULL);
4873                 /*
4874                  * We copy the compressed data from the start of the arc buffer
4875                  * (the zio_read will have pulled in only what we need, the
4876                  * rest is garbage which we will overwrite at decompression)
4877                  * and then decompress back to the ARC data buffer. This way we
4878                  * can minimize copying by simply decompressing back over the
4879                  * original compressed data (rather than decompressing to an
4880                  * aux buffer and then copying back the uncompressed buffer,
4881                  * which is likely to be much larger).
4882                  */
4883                 uint64_t csize;
4884                 void *cdata;
4885 
4886                 csize = zio->io_size;
4887                 cdata = zio_data_buf_alloc(csize);
4888                 bcopy(zio->io_data, cdata, csize);
4889                 if (zio_decompress_data(c, cdata, zio->io_data, csize,
4890                     hdr->b_size) != 0)
4891                         zio->io_error = EIO;
4892                 zio_data_buf_free(cdata, csize);
4893         }
4894 
4895         /* Restore the expected uncompressed IO size. */
4896         zio->io_orig_size = zio->io_size = hdr->b_size;
4897 }
4898 
4899 /*
4900  * This thread feeds the L2ARC at regular intervals.  This is the beating
4901  * heart of the L2ARC.
4902  */
4903 static void
4904 l2arc_feed_thread(void)
4905 {
4906         callb_cpr_t cpr;
4907         l2arc_dev_t *dev;
4908         spa_t *spa;
4909         uint64_t size, wrote;
4910         clock_t begin, next = ddi_get_lbolt();
4911         boolean_t headroom_boost = B_FALSE;
4912 
4913         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4914 
4915         mutex_enter(&l2arc_feed_thr_lock);
4916 
4917         while (l2arc_thread_exit == 0) {
4918                 CALLB_CPR_SAFE_BEGIN(&cpr);
4919                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4920                     next);
4921                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4922                 next = ddi_get_lbolt() + hz;
4923 
4924                 /*
4925                  * Quick check for L2ARC devices.
4926                  */
4927                 mutex_enter(&l2arc_dev_mtx);
4928                 if (l2arc_ndev == 0) {
4929                         mutex_exit(&l2arc_dev_mtx);
4930                         continue;
4931                 }
4932                 mutex_exit(&l2arc_dev_mtx);
4933                 begin = ddi_get_lbolt();
4934 
4935                 /*
4936                  * This selects the next l2arc device to write to, and in
4937                  * doing so the next spa to feed from: dev->l2ad_spa.   This
4938                  * will return NULL if there are now no l2arc devices or if
4939                  * they are all faulted.
4940                  *
4941                  * If a device is returned, its spa's config lock is also
4942                  * held to prevent device removal.  l2arc_dev_get_next()
4943                  * will grab and release l2arc_dev_mtx.
4944                  */
4945                 if ((dev = l2arc_dev_get_next()) == NULL)
4946                         continue;
4947 
4948                 spa = dev->l2ad_spa;
4949                 ASSERT(spa != NULL);
4950 
4951                 /*
4952                  * If the pool is read-only then force the feed thread to
4953                  * sleep a little longer.
4954                  */
4955                 if (!spa_writeable(spa)) {
4956                         next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4957                         spa_config_exit(spa, SCL_L2ARC, dev);
4958                         continue;
4959                 }
4960 
4961                 /*
4962                  * Avoid contributing to memory pressure.
4963                  */
4964                 if (arc_reclaim_needed()) {
4965                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4966                         spa_config_exit(spa, SCL_L2ARC, dev);
4967                         continue;
4968                 }
4969 
4970                 ARCSTAT_BUMP(arcstat_l2_feeds);
4971 
4972                 size = l2arc_write_size();
4973 
4974                 /*
4975                  * Evict L2ARC buffers that will be overwritten.
4976                  */
4977                 l2arc_evict(dev, size, B_FALSE);
4978 
4979                 /*
4980                  * Write ARC buffers.
4981                  */
4982                 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
4983 
4984                 /*
4985                  * Calculate interval between writes.
4986                  */
4987                 next = l2arc_write_interval(begin, size, wrote);
4988                 spa_config_exit(spa, SCL_L2ARC, dev);
4989         }
4990 
4991         l2arc_thread_exit = 0;
4992         cv_broadcast(&l2arc_feed_thr_cv);
4993         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
4994         thread_exit();
4995 }
4996 
4997 boolean_t
4998 l2arc_vdev_present(vdev_t *vd)
4999 {
5000         l2arc_dev_t *dev;
5001 
5002         mutex_enter(&l2arc_dev_mtx);
5003         for (dev = list_head(l2arc_dev_list); dev != NULL;
5004             dev = list_next(l2arc_dev_list, dev)) {
5005                 if (dev->l2ad_vdev == vd)
5006                         break;
5007         }
5008         mutex_exit(&l2arc_dev_mtx);
5009 
5010         return (dev != NULL);
5011 }
5012 
5013 /*
5014  * Add a vdev for use by the L2ARC.  By this point the spa has already
5015  * validated the vdev and opened it.
5016  */
5017 void
5018 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5019 {
5020         l2arc_dev_t *adddev;
5021 
5022         ASSERT(!l2arc_vdev_present(vd));
5023 
5024         /*
5025          * Create a new l2arc device entry.
5026          */
5027         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5028         adddev->l2ad_spa = spa;
5029         adddev->l2ad_vdev = vd;
5030         adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5031         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5032         adddev->l2ad_hand = adddev->l2ad_start;
5033         adddev->l2ad_evict = adddev->l2ad_start;
5034         adddev->l2ad_first = B_TRUE;
5035         adddev->l2ad_writing = B_FALSE;
5036 
5037         /*
5038          * This is a list of all ARC buffers that are still valid on the
5039          * device.
5040          */
5041         adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5042         list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5043             offsetof(arc_buf_hdr_t, b_l2node));
5044 
5045         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5046 
5047         /*
5048          * Add device to global list
5049          */
5050         mutex_enter(&l2arc_dev_mtx);
5051         list_insert_head(l2arc_dev_list, adddev);
5052         atomic_inc_64(&l2arc_ndev);
5053         mutex_exit(&l2arc_dev_mtx);
5054 }
5055 
5056 /*
5057  * Remove a vdev from the L2ARC.
5058  */
5059 void
5060 l2arc_remove_vdev(vdev_t *vd)
5061 {
5062         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5063 
5064         /*
5065          * Find the device by vdev
5066          */
5067         mutex_enter(&l2arc_dev_mtx);
5068         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5069                 nextdev = list_next(l2arc_dev_list, dev);
5070                 if (vd == dev->l2ad_vdev) {
5071                         remdev = dev;
5072                         break;
5073                 }
5074         }
5075         ASSERT(remdev != NULL);
5076 
5077         /*
5078          * Remove device from global list
5079          */
5080         list_remove(l2arc_dev_list, remdev);
5081         l2arc_dev_last = NULL;          /* may have been invalidated */
5082         atomic_dec_64(&l2arc_ndev);
5083         mutex_exit(&l2arc_dev_mtx);
5084 
5085         /*
5086          * Clear all buflists and ARC references.  L2ARC device flush.
5087          */
5088         l2arc_evict(remdev, 0, B_TRUE);
5089         list_destroy(remdev->l2ad_buflist);
5090         kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5091         kmem_free(remdev, sizeof (l2arc_dev_t));
5092 }
5093 
5094 void
5095 l2arc_init(void)
5096 {
5097         l2arc_thread_exit = 0;
5098         l2arc_ndev = 0;
5099         l2arc_writes_sent = 0;
5100         l2arc_writes_done = 0;
5101 
5102         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5103         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5104         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5105         mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5106         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5107 
5108         l2arc_dev_list = &L2ARC_dev_list;
5109         l2arc_free_on_write = &L2ARC_free_on_write;
5110         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5111             offsetof(l2arc_dev_t, l2ad_node));
5112         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5113             offsetof(l2arc_data_free_t, l2df_list_node));
5114 }
5115 
5116 void
5117 l2arc_fini(void)
5118 {
5119         /*
5120          * This is called from dmu_fini(), which is called from spa_fini();
5121          * Because of this, we can assume that all l2arc devices have
5122          * already been removed when the pools themselves were removed.
5123          */
5124 
5125         l2arc_do_free_on_write();
5126 
5127         mutex_destroy(&l2arc_feed_thr_lock);
5128         cv_destroy(&l2arc_feed_thr_cv);
5129         mutex_destroy(&l2arc_dev_mtx);
5130         mutex_destroy(&l2arc_buflist_mtx);
5131         mutex_destroy(&l2arc_free_on_write_mtx);
5132 
5133         list_destroy(l2arc_dev_list);
5134         list_destroy(l2arc_free_on_write);
5135 }
5136 
5137 void
5138 l2arc_start(void)
5139 {
5140         if (!(spa_mode_global & FWRITE))
5141                 return;
5142 
5143         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5144             TS_RUN, minclsyspri);
5145 }
5146 
5147 void
5148 l2arc_stop(void)
5149 {
5150         if (!(spa_mode_global & FWRITE))
5151                 return;
5152 
5153         mutex_enter(&l2arc_feed_thr_lock);
5154         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
5155         l2arc_thread_exit = 1;
5156         while (l2arc_thread_exit != 0)
5157                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5158         mutex_exit(&l2arc_feed_thr_lock);
5159 }