1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2012 by Delphix. All rights reserved.
  25  */
  26 
  27 /*
  28  * DVA-based Adjustable Replacement Cache
  29  *
  30  * While much of the theory of operation used here is
  31  * based on the self-tuning, low overhead replacement cache
  32  * presented by Megiddo and Modha at FAST 2003, there are some
  33  * significant differences:
  34  *
  35  * 1. The Megiddo and Modha model assumes any page is evictable.
  36  * Pages in its cache cannot be "locked" into memory.  This makes
  37  * the eviction algorithm simple: evict the last page in the list.
  38  * This also make the performance characteristics easy to reason
  39  * about.  Our cache is not so simple.  At any given moment, some
  40  * subset of the blocks in the cache are un-evictable because we
  41  * have handed out a reference to them.  Blocks are only evictable
  42  * when there are no external references active.  This makes
  43  * eviction far more problematic:  we choose to evict the evictable
  44  * blocks that are the "lowest" in the list.
  45  *
  46  * There are times when it is not possible to evict the requested
  47  * space.  In these circumstances we are unable to adjust the cache
  48  * size.  To prevent the cache growing unbounded at these times we
  49  * implement a "cache throttle" that slows the flow of new data
  50  * into the cache until we can make space available.
  51  *
  52  * 2. The Megiddo and Modha model assumes a fixed cache size.
  53  * Pages are evicted when the cache is full and there is a cache
  54  * miss.  Our model has a variable sized cache.  It grows with
  55  * high use, but also tries to react to memory pressure from the
  56  * operating system: decreasing its size when system memory is
  57  * tight.
  58  *
  59  * 3. The Megiddo and Modha model assumes a fixed page size. All
  60  * elements of the cache are therefor exactly the same size.  So
  61  * when adjusting the cache size following a cache miss, its simply
  62  * a matter of choosing a single page to evict.  In our model, we
  63  * have variable sized cache blocks (rangeing from 512 bytes to
  64  * 128K bytes).  We therefor choose a set of blocks to evict to make
  65  * space for a cache miss that approximates as closely as possible
  66  * the space used by the new block.
  67  *
  68  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  69  * by N. Megiddo & D. Modha, FAST 2003
  70  */
  71 
  72 /*
  73  * The locking model:
  74  *
  75  * A new reference to a cache buffer can be obtained in two
  76  * ways: 1) via a hash table lookup using the DVA as a key,
  77  * or 2) via one of the ARC lists.  The arc_read() interface
  78  * uses method 1, while the internal arc algorithms for
  79  * adjusting the cache use method 2.  We therefor provide two
  80  * types of locks: 1) the hash table lock array, and 2) the
  81  * arc list locks.
  82  *
  83  * Buffers do not have their own mutexes, rather they rely on the
  84  * hash table mutexes for the bulk of their protection (i.e. most
  85  * fields in the arc_buf_hdr_t are protected by these mutexes).
  86  *
  87  * buf_hash_find() returns the appropriate mutex (held) when it
  88  * locates the requested buffer in the hash table.  It returns
  89  * NULL for the mutex if the buffer was not in the table.
  90  *
  91  * buf_hash_remove() expects the appropriate hash mutex to be
  92  * already held before it is invoked.
  93  *
  94  * Each arc state also has a mutex which is used to protect the
  95  * buffer list associated with the state.  When attempting to
  96  * obtain a hash table lock while holding an arc list lock you
  97  * must use: mutex_tryenter() to avoid deadlock.  Also note that
  98  * the active state mutex must be held before the ghost state mutex.
  99  *
 100  * Arc buffers may have an associated eviction callback function.
 101  * This function will be invoked prior to removing the buffer (e.g.
 102  * in arc_do_user_evicts()).  Note however that the data associated
 103  * with the buffer may be evicted prior to the callback.  The callback
 104  * must be made with *no locks held* (to prevent deadlock).  Additionally,
 105  * the users of callbacks must ensure that their private data is
 106  * protected from simultaneous callbacks from arc_buf_evict()
 107  * and arc_do_user_evicts().
 108  *
 109  * Note that the majority of the performance stats are manipulated
 110  * with atomic operations.
 111  *
 112  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
 113  *
 114  *      - L2ARC buflist creation
 115  *      - L2ARC buflist eviction
 116  *      - L2ARC write completion, which walks L2ARC buflists
 117  *      - ARC header destruction, as it removes from L2ARC buflists
 118  *      - ARC header release, as it removes from L2ARC buflists
 119  */
 120 
 121 #include <sys/spa.h>
 122 #include <sys/zio.h>
 123 #include <sys/zfs_context.h>
 124 #include <sys/arc.h>
 125 #include <sys/refcount.h>
 126 #include <sys/vdev.h>
 127 #include <sys/vdev_impl.h>
 128 #ifdef _KERNEL
 129 #include <sys/vmsystm.h>
 130 #include <vm/anon.h>
 131 #include <sys/fs/swapnode.h>
 132 #include <sys/dnlc.h>
 133 #endif
 134 #include <sys/callb.h>
 135 #include <sys/kstat.h>
 136 #include <zfs_fletcher.h>
 137 
 138 static kmutex_t         arc_reclaim_thr_lock;
 139 static kcondvar_t       arc_reclaim_thr_cv;     /* used to signal reclaim thr */
 140 static uint8_t          arc_thread_exit;
 141 
 142 extern int zfs_write_limit_shift;
 143 extern uint64_t zfs_write_limit_max;
 144 extern kmutex_t zfs_write_limit_lock;
 145 
 146 #define ARC_REDUCE_DNLC_PERCENT 3
 147 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
 148 
 149 typedef enum arc_reclaim_strategy {
 150         ARC_RECLAIM_AGGR,               /* Aggressive reclaim strategy */
 151         ARC_RECLAIM_CONS                /* Conservative reclaim strategy */
 152 } arc_reclaim_strategy_t;
 153 
 154 /* number of seconds before growing cache again */
 155 static int              arc_grow_retry = 60;
 156 
 157 /* shift of arc_c for calculating both min and max arc_p */
 158 static int              arc_p_min_shift = 4;
 159 
 160 /* log2(fraction of arc to reclaim) */
 161 static int              arc_shrink_shift = 5;
 162 
 163 /*
 164  * minimum lifespan of a prefetch block in clock ticks
 165  * (initialized in arc_init())
 166  */
 167 static int              arc_min_prefetch_lifespan;
 168 
 169 static int arc_dead;
 170 
 171 /*
 172  * The arc has filled available memory and has now warmed up.
 173  */
 174 static boolean_t arc_warm;
 175 
 176 /*
 177  * These tunables are for performance analysis.
 178  */
 179 uint64_t zfs_arc_max;
 180 uint64_t zfs_arc_min;
 181 uint64_t zfs_arc_meta_limit = 0;
 182 int zfs_arc_grow_retry = 0;
 183 int zfs_arc_shrink_shift = 0;
 184 int zfs_arc_p_min_shift = 0;
 185 
 186 /*
 187  * Note that buffers can be in one of 6 states:
 188  *      ARC_anon        - anonymous (discussed below)
 189  *      ARC_mru         - recently used, currently cached
 190  *      ARC_mru_ghost   - recentely used, no longer in cache
 191  *      ARC_mfu         - frequently used, currently cached
 192  *      ARC_mfu_ghost   - frequently used, no longer in cache
 193  *      ARC_l2c_only    - exists in L2ARC but not other states
 194  * When there are no active references to the buffer, they are
 195  * are linked onto a list in one of these arc states.  These are
 196  * the only buffers that can be evicted or deleted.  Within each
 197  * state there are multiple lists, one for meta-data and one for
 198  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
 199  * etc.) is tracked separately so that it can be managed more
 200  * explicitly: favored over data, limited explicitly.
 201  *
 202  * Anonymous buffers are buffers that are not associated with
 203  * a DVA.  These are buffers that hold dirty block copies
 204  * before they are written to stable storage.  By definition,
 205  * they are "ref'd" and are considered part of arc_mru
 206  * that cannot be freed.  Generally, they will aquire a DVA
 207  * as they are written and migrate onto the arc_mru list.
 208  *
 209  * The ARC_l2c_only state is for buffers that are in the second
 210  * level ARC but no longer in any of the ARC_m* lists.  The second
 211  * level ARC itself may also contain buffers that are in any of
 212  * the ARC_m* states - meaning that a buffer can exist in two
 213  * places.  The reason for the ARC_l2c_only state is to keep the
 214  * buffer header in the hash table, so that reads that hit the
 215  * second level ARC benefit from these fast lookups.
 216  */
 217 
 218 typedef struct arc_state {
 219         list_t  arcs_list[ARC_BUFC_NUMTYPES];   /* list of evictable buffers */
 220         uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
 221         uint64_t arcs_size;     /* total amount of data in this state */
 222         kmutex_t arcs_mtx;
 223 } arc_state_t;
 224 
 225 /* The 6 states: */
 226 static arc_state_t ARC_anon;
 227 static arc_state_t ARC_mru;
 228 static arc_state_t ARC_mru_ghost;
 229 static arc_state_t ARC_mfu;
 230 static arc_state_t ARC_mfu_ghost;
 231 static arc_state_t ARC_l2c_only;
 232 
 233 typedef struct arc_stats {
 234         kstat_named_t arcstat_hits;
 235         kstat_named_t arcstat_misses;
 236         kstat_named_t arcstat_demand_data_hits;
 237         kstat_named_t arcstat_demand_data_misses;
 238         kstat_named_t arcstat_demand_metadata_hits;
 239         kstat_named_t arcstat_demand_metadata_misses;
 240         kstat_named_t arcstat_prefetch_data_hits;
 241         kstat_named_t arcstat_prefetch_data_misses;
 242         kstat_named_t arcstat_prefetch_metadata_hits;
 243         kstat_named_t arcstat_prefetch_metadata_misses;
 244         kstat_named_t arcstat_mru_hits;
 245         kstat_named_t arcstat_mru_ghost_hits;
 246         kstat_named_t arcstat_mfu_hits;
 247         kstat_named_t arcstat_mfu_ghost_hits;
 248         kstat_named_t arcstat_deleted;
 249         kstat_named_t arcstat_recycle_miss;
 250         kstat_named_t arcstat_mutex_miss;
 251         kstat_named_t arcstat_evict_skip;
 252         kstat_named_t arcstat_evict_l2_cached;
 253         kstat_named_t arcstat_evict_l2_eligible;
 254         kstat_named_t arcstat_evict_l2_ineligible;
 255         kstat_named_t arcstat_hash_elements;
 256         kstat_named_t arcstat_hash_elements_max;
 257         kstat_named_t arcstat_hash_collisions;
 258         kstat_named_t arcstat_hash_chains;
 259         kstat_named_t arcstat_hash_chain_max;
 260         kstat_named_t arcstat_p;
 261         kstat_named_t arcstat_c;
 262         kstat_named_t arcstat_c_min;
 263         kstat_named_t arcstat_c_max;
 264         kstat_named_t arcstat_size;
 265         kstat_named_t arcstat_hdr_size;
 266         kstat_named_t arcstat_data_size;
 267         kstat_named_t arcstat_other_size;
 268         kstat_named_t arcstat_l2_hits;
 269         kstat_named_t arcstat_l2_misses;
 270         kstat_named_t arcstat_l2_feeds;
 271         kstat_named_t arcstat_l2_rw_clash;
 272         kstat_named_t arcstat_l2_read_bytes;
 273         kstat_named_t arcstat_l2_write_bytes;
 274         kstat_named_t arcstat_l2_writes_sent;
 275         kstat_named_t arcstat_l2_writes_done;
 276         kstat_named_t arcstat_l2_writes_error;
 277         kstat_named_t arcstat_l2_writes_hdr_miss;
 278         kstat_named_t arcstat_l2_evict_lock_retry;
 279         kstat_named_t arcstat_l2_evict_reading;
 280         kstat_named_t arcstat_l2_free_on_write;
 281         kstat_named_t arcstat_l2_abort_lowmem;
 282         kstat_named_t arcstat_l2_cksum_bad;
 283         kstat_named_t arcstat_l2_io_error;
 284         kstat_named_t arcstat_l2_size;
 285         kstat_named_t arcstat_l2_hdr_size;
 286         kstat_named_t arcstat_memory_throttle_count;
 287 } arc_stats_t;
 288 
 289 static arc_stats_t arc_stats = {
 290         { "hits",                       KSTAT_DATA_UINT64 },
 291         { "misses",                     KSTAT_DATA_UINT64 },
 292         { "demand_data_hits",           KSTAT_DATA_UINT64 },
 293         { "demand_data_misses",         KSTAT_DATA_UINT64 },
 294         { "demand_metadata_hits",       KSTAT_DATA_UINT64 },
 295         { "demand_metadata_misses",     KSTAT_DATA_UINT64 },
 296         { "prefetch_data_hits",         KSTAT_DATA_UINT64 },
 297         { "prefetch_data_misses",       KSTAT_DATA_UINT64 },
 298         { "prefetch_metadata_hits",     KSTAT_DATA_UINT64 },
 299         { "prefetch_metadata_misses",   KSTAT_DATA_UINT64 },
 300         { "mru_hits",                   KSTAT_DATA_UINT64 },
 301         { "mru_ghost_hits",             KSTAT_DATA_UINT64 },
 302         { "mfu_hits",                   KSTAT_DATA_UINT64 },
 303         { "mfu_ghost_hits",             KSTAT_DATA_UINT64 },
 304         { "deleted",                    KSTAT_DATA_UINT64 },
 305         { "recycle_miss",               KSTAT_DATA_UINT64 },
 306         { "mutex_miss",                 KSTAT_DATA_UINT64 },
 307         { "evict_skip",                 KSTAT_DATA_UINT64 },
 308         { "evict_l2_cached",            KSTAT_DATA_UINT64 },
 309         { "evict_l2_eligible",          KSTAT_DATA_UINT64 },
 310         { "evict_l2_ineligible",        KSTAT_DATA_UINT64 },
 311         { "hash_elements",              KSTAT_DATA_UINT64 },
 312         { "hash_elements_max",          KSTAT_DATA_UINT64 },
 313         { "hash_collisions",            KSTAT_DATA_UINT64 },
 314         { "hash_chains",                KSTAT_DATA_UINT64 },
 315         { "hash_chain_max",             KSTAT_DATA_UINT64 },
 316         { "p",                          KSTAT_DATA_UINT64 },
 317         { "c",                          KSTAT_DATA_UINT64 },
 318         { "c_min",                      KSTAT_DATA_UINT64 },
 319         { "c_max",                      KSTAT_DATA_UINT64 },
 320         { "size",                       KSTAT_DATA_UINT64 },
 321         { "hdr_size",                   KSTAT_DATA_UINT64 },
 322         { "data_size",                  KSTAT_DATA_UINT64 },
 323         { "other_size",                 KSTAT_DATA_UINT64 },
 324         { "l2_hits",                    KSTAT_DATA_UINT64 },
 325         { "l2_misses",                  KSTAT_DATA_UINT64 },
 326         { "l2_feeds",                   KSTAT_DATA_UINT64 },
 327         { "l2_rw_clash",                KSTAT_DATA_UINT64 },
 328         { "l2_read_bytes",              KSTAT_DATA_UINT64 },
 329         { "l2_write_bytes",             KSTAT_DATA_UINT64 },
 330         { "l2_writes_sent",             KSTAT_DATA_UINT64 },
 331         { "l2_writes_done",             KSTAT_DATA_UINT64 },
 332         { "l2_writes_error",            KSTAT_DATA_UINT64 },
 333         { "l2_writes_hdr_miss",         KSTAT_DATA_UINT64 },
 334         { "l2_evict_lock_retry",        KSTAT_DATA_UINT64 },
 335         { "l2_evict_reading",           KSTAT_DATA_UINT64 },
 336         { "l2_free_on_write",           KSTAT_DATA_UINT64 },
 337         { "l2_abort_lowmem",            KSTAT_DATA_UINT64 },
 338         { "l2_cksum_bad",               KSTAT_DATA_UINT64 },
 339         { "l2_io_error",                KSTAT_DATA_UINT64 },
 340         { "l2_size",                    KSTAT_DATA_UINT64 },
 341         { "l2_hdr_size",                KSTAT_DATA_UINT64 },
 342         { "memory_throttle_count",      KSTAT_DATA_UINT64 }
 343 };
 344 
 345 #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 346 
 347 #define ARCSTAT_INCR(stat, val) \
 348         atomic_add_64(&arc_stats.stat.value.ui64, (val));
 349 
 350 #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 351 #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 352 
 353 #define ARCSTAT_MAX(stat, val) {                                        \
 354         uint64_t m;                                                     \
 355         while ((val) > (m = arc_stats.stat.value.ui64) &&            \
 356             (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))     \
 357                 continue;                                               \
 358 }
 359 
 360 #define ARCSTAT_MAXSTAT(stat) \
 361         ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
 362 
 363 /*
 364  * We define a macro to allow ARC hits/misses to be easily broken down by
 365  * two separate conditions, giving a total of four different subtypes for
 366  * each of hits and misses (so eight statistics total).
 367  */
 368 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
 369         if (cond1) {                                                    \
 370                 if (cond2) {                                            \
 371                         ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
 372                 } else {                                                \
 373                         ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
 374                 }                                                       \
 375         } else {                                                        \
 376                 if (cond2) {                                            \
 377                         ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
 378                 } else {                                                \
 379                         ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
 380                 }                                                       \
 381         }
 382 
 383 kstat_t                 *arc_ksp;
 384 static arc_state_t      *arc_anon;
 385 static arc_state_t      *arc_mru;
 386 static arc_state_t      *arc_mru_ghost;
 387 static arc_state_t      *arc_mfu;
 388 static arc_state_t      *arc_mfu_ghost;
 389 static arc_state_t      *arc_l2c_only;
 390 
 391 /*
 392  * There are several ARC variables that are critical to export as kstats --
 393  * but we don't want to have to grovel around in the kstat whenever we wish to
 394  * manipulate them.  For these variables, we therefore define them to be in
 395  * terms of the statistic variable.  This assures that we are not introducing
 396  * the possibility of inconsistency by having shadow copies of the variables,
 397  * while still allowing the code to be readable.
 398  */
 399 #define arc_size        ARCSTAT(arcstat_size)   /* actual total arc size */
 400 #define arc_p           ARCSTAT(arcstat_p)      /* target size of MRU */
 401 #define arc_c           ARCSTAT(arcstat_c)      /* target size of cache */
 402 #define arc_c_min       ARCSTAT(arcstat_c_min)  /* min target cache size */
 403 #define arc_c_max       ARCSTAT(arcstat_c_max)  /* max target cache size */
 404 
 405 static int              arc_no_grow;    /* Don't try to grow cache size */
 406 static uint64_t         arc_tempreserve;
 407 static uint64_t         arc_loaned_bytes;
 408 static uint64_t         arc_meta_used;
 409 static uint64_t         arc_meta_limit;
 410 static uint64_t         arc_meta_max = 0;
 411 
 412 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
 413 
 414 typedef struct arc_callback arc_callback_t;
 415 
 416 struct arc_callback {
 417         void                    *acb_private;
 418         arc_done_func_t         *acb_done;
 419         arc_buf_t               *acb_buf;
 420         zio_t                   *acb_zio_dummy;
 421         arc_callback_t          *acb_next;
 422 };
 423 
 424 typedef struct arc_write_callback arc_write_callback_t;
 425 
 426 struct arc_write_callback {
 427         void            *awcb_private;
 428         arc_done_func_t *awcb_ready;
 429         arc_done_func_t *awcb_done;
 430         arc_buf_t       *awcb_buf;
 431 };
 432 
 433 struct arc_buf_hdr {
 434         /* protected by hash lock */
 435         dva_t                   b_dva;
 436         uint64_t                b_birth;
 437         uint64_t                b_cksum0;
 438 
 439         kmutex_t                b_freeze_lock;
 440         zio_cksum_t             *b_freeze_cksum;
 441         void                    *b_thawed;
 442 
 443         arc_buf_hdr_t           *b_hash_next;
 444         arc_buf_t               *b_buf;
 445         uint32_t                b_flags;
 446         uint32_t                b_datacnt;
 447 
 448         arc_callback_t          *b_acb;
 449         kcondvar_t              b_cv;
 450 
 451         /* immutable */
 452         arc_buf_contents_t      b_type;
 453         uint64_t                b_size;
 454         uint64_t                b_spa;
 455 
 456         /* protected by arc state mutex */
 457         arc_state_t             *b_state;
 458         list_node_t             b_arc_node;
 459 
 460         /* updated atomically */
 461         clock_t                 b_arc_access;
 462 
 463         /* self protecting */
 464         refcount_t              b_refcnt;
 465 
 466         l2arc_buf_hdr_t         *b_l2hdr;
 467         list_node_t             b_l2node;
 468 };
 469 
 470 static arc_buf_t *arc_eviction_list;
 471 static kmutex_t arc_eviction_mtx;
 472 static arc_buf_hdr_t arc_eviction_hdr;
 473 static void arc_get_data_buf(arc_buf_t *buf);
 474 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
 475 static int arc_evict_needed(arc_buf_contents_t type);
 476 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
 477 
 478 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
 479 
 480 #define GHOST_STATE(state)      \
 481         ((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||        \
 482         (state) == arc_l2c_only)
 483 
 484 /*
 485  * Private ARC flags.  These flags are private ARC only flags that will show up
 486  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
 487  * be passed in as arc_flags in things like arc_read.  However, these flags
 488  * should never be passed and should only be set by ARC code.  When adding new
 489  * public flags, make sure not to smash the private ones.
 490  */
 491 
 492 #define ARC_IN_HASH_TABLE       (1 << 9)  /* this buffer is hashed */
 493 #define ARC_IO_IN_PROGRESS      (1 << 10) /* I/O in progress for buf */
 494 #define ARC_IO_ERROR            (1 << 11) /* I/O failed for buf */
 495 #define ARC_FREED_IN_READ       (1 << 12) /* buf freed while in read */
 496 #define ARC_BUF_AVAILABLE       (1 << 13) /* block not in active use */
 497 #define ARC_INDIRECT            (1 << 14) /* this is an indirect block */
 498 #define ARC_FREE_IN_PROGRESS    (1 << 15) /* hdr about to be freed */
 499 #define ARC_L2_WRITING          (1 << 16) /* L2ARC write in progress */
 500 #define ARC_L2_EVICTED          (1 << 17) /* evicted during I/O */
 501 #define ARC_L2_WRITE_HEAD       (1 << 18) /* head of write list */
 502 
 503 #define HDR_IN_HASH_TABLE(hdr)  ((hdr)->b_flags & ARC_IN_HASH_TABLE)
 504 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
 505 #define HDR_IO_ERROR(hdr)       ((hdr)->b_flags & ARC_IO_ERROR)
 506 #define HDR_PREFETCH(hdr)       ((hdr)->b_flags & ARC_PREFETCH)
 507 #define HDR_FREED_IN_READ(hdr)  ((hdr)->b_flags & ARC_FREED_IN_READ)
 508 #define HDR_BUF_AVAILABLE(hdr)  ((hdr)->b_flags & ARC_BUF_AVAILABLE)
 509 #define HDR_FREE_IN_PROGRESS(hdr)       ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
 510 #define HDR_L2CACHE(hdr)        ((hdr)->b_flags & ARC_L2CACHE)
 511 #define HDR_L2_READING(hdr)     ((hdr)->b_flags & ARC_IO_IN_PROGRESS &&  \
 512                                     (hdr)->b_l2hdr != NULL)
 513 #define HDR_L2_WRITING(hdr)     ((hdr)->b_flags & ARC_L2_WRITING)
 514 #define HDR_L2_EVICTED(hdr)     ((hdr)->b_flags & ARC_L2_EVICTED)
 515 #define HDR_L2_WRITE_HEAD(hdr)  ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
 516 
 517 /*
 518  * Other sizes
 519  */
 520 
 521 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
 522 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
 523 
 524 /*
 525  * Hash table routines
 526  */
 527 
 528 #define HT_LOCK_PAD     64
 529 
 530 struct ht_lock {
 531         kmutex_t        ht_lock;
 532 #ifdef _KERNEL
 533         unsigned char   pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
 534 #endif
 535 };
 536 
 537 #define BUF_LOCKS 256
 538 typedef struct buf_hash_table {
 539         uint64_t ht_mask;
 540         arc_buf_hdr_t **ht_table;
 541         struct ht_lock ht_locks[BUF_LOCKS];
 542 } buf_hash_table_t;
 543 
 544 static buf_hash_table_t buf_hash_table;
 545 
 546 #define BUF_HASH_INDEX(spa, dva, birth) \
 547         (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
 548 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
 549 #define BUF_HASH_LOCK(idx)      (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
 550 #define HDR_LOCK(hdr) \
 551         (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
 552 
 553 uint64_t zfs_crc64_table[256];
 554 
 555 /*
 556  * Level 2 ARC
 557  */
 558 
 559 #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 560 #define L2ARC_HEADROOM          2               /* num of writes */
 561 #define L2ARC_FEED_SECS         1               /* caching interval secs */
 562 #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 563 
 564 #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 565 #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 566 
 567 /*
 568  * L2ARC Performance Tunables
 569  */
 570 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 571 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 572 uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 573 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 574 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 575 boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 576 boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 577 boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
 578 
 579 /*
 580  * L2ARC Internals
 581  */
 582 typedef struct l2arc_dev {
 583         vdev_t                  *l2ad_vdev;     /* vdev */
 584         spa_t                   *l2ad_spa;      /* spa */
 585         uint64_t                l2ad_hand;      /* next write location */
 586         uint64_t                l2ad_write;     /* desired write size, bytes */
 587         uint64_t                l2ad_boost;     /* warmup write boost, bytes */
 588         uint64_t                l2ad_start;     /* first addr on device */
 589         uint64_t                l2ad_end;       /* last addr on device */
 590         uint64_t                l2ad_evict;     /* last addr eviction reached */
 591         boolean_t               l2ad_first;     /* first sweep through */
 592         boolean_t               l2ad_writing;   /* currently writing */
 593         list_t                  *l2ad_buflist;  /* buffer list */
 594         list_node_t             l2ad_node;      /* device list node */
 595 } l2arc_dev_t;
 596 
 597 static list_t L2ARC_dev_list;                   /* device list */
 598 static list_t *l2arc_dev_list;                  /* device list pointer */
 599 static kmutex_t l2arc_dev_mtx;                  /* device list mutex */
 600 static l2arc_dev_t *l2arc_dev_last;             /* last device used */
 601 static kmutex_t l2arc_buflist_mtx;              /* mutex for all buflists */
 602 static list_t L2ARC_free_on_write;              /* free after write buf list */
 603 static list_t *l2arc_free_on_write;             /* free after write list ptr */
 604 static kmutex_t l2arc_free_on_write_mtx;        /* mutex for list */
 605 static uint64_t l2arc_ndev;                     /* number of devices */
 606 
 607 typedef struct l2arc_read_callback {
 608         arc_buf_t       *l2rcb_buf;             /* read buffer */
 609         spa_t           *l2rcb_spa;             /* spa */
 610         blkptr_t        l2rcb_bp;               /* original blkptr */
 611         zbookmark_t     l2rcb_zb;               /* original bookmark */
 612         int             l2rcb_flags;            /* original flags */
 613 } l2arc_read_callback_t;
 614 
 615 typedef struct l2arc_write_callback {
 616         l2arc_dev_t     *l2wcb_dev;             /* device info */
 617         arc_buf_hdr_t   *l2wcb_head;            /* head of write buflist */
 618 } l2arc_write_callback_t;
 619 
 620 struct l2arc_buf_hdr {
 621         /* protected by arc_buf_hdr  mutex */
 622         l2arc_dev_t     *b_dev;                 /* L2ARC device */
 623         uint64_t        b_daddr;                /* disk address, offset byte */
 624 };
 625 
 626 typedef struct l2arc_data_free {
 627         /* protected by l2arc_free_on_write_mtx */
 628         void            *l2df_data;
 629         size_t          l2df_size;
 630         void            (*l2df_func)(void *, size_t);
 631         list_node_t     l2df_list_node;
 632 } l2arc_data_free_t;
 633 
 634 static kmutex_t l2arc_feed_thr_lock;
 635 static kcondvar_t l2arc_feed_thr_cv;
 636 static uint8_t l2arc_thread_exit;
 637 
 638 static void l2arc_read_done(zio_t *zio);
 639 static void l2arc_hdr_stat_add(void);
 640 static void l2arc_hdr_stat_remove(void);
 641 
 642 static uint64_t
 643 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
 644 {
 645         uint8_t *vdva = (uint8_t *)dva;
 646         uint64_t crc = -1ULL;
 647         int i;
 648 
 649         ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
 650 
 651         for (i = 0; i < sizeof (dva_t); i++)
 652                 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
 653 
 654         crc ^= (spa>>8) ^ birth;
 655 
 656         return (crc);
 657 }
 658 
 659 #define BUF_EMPTY(buf)                                          \
 660         ((buf)->b_dva.dva_word[0] == 0 &&                    \
 661         (buf)->b_dva.dva_word[1] == 0 &&                     \
 662         (buf)->b_birth == 0)
 663 
 664 #define BUF_EQUAL(spa, dva, birth, buf)                         \
 665         ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&       \
 666         ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&       \
 667         ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
 668 
 669 static void
 670 buf_discard_identity(arc_buf_hdr_t *hdr)
 671 {
 672         hdr->b_dva.dva_word[0] = 0;
 673         hdr->b_dva.dva_word[1] = 0;
 674         hdr->b_birth = 0;
 675         hdr->b_cksum0 = 0;
 676 }
 677 
 678 static arc_buf_hdr_t *
 679 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
 680 {
 681         uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 682         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 683         arc_buf_hdr_t *buf;
 684 
 685         mutex_enter(hash_lock);
 686         for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
 687             buf = buf->b_hash_next) {
 688                 if (BUF_EQUAL(spa, dva, birth, buf)) {
 689                         *lockp = hash_lock;
 690                         return (buf);
 691                 }
 692         }
 693         mutex_exit(hash_lock);
 694         *lockp = NULL;
 695         return (NULL);
 696 }
 697 
 698 /*
 699  * Insert an entry into the hash table.  If there is already an element
 700  * equal to elem in the hash table, then the already existing element
 701  * will be returned and the new element will not be inserted.
 702  * Otherwise returns NULL.
 703  */
 704 static arc_buf_hdr_t *
 705 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
 706 {
 707         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 708         kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
 709         arc_buf_hdr_t *fbuf;
 710         uint32_t i;
 711 
 712         ASSERT(!HDR_IN_HASH_TABLE(buf));
 713         *lockp = hash_lock;
 714         mutex_enter(hash_lock);
 715         for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
 716             fbuf = fbuf->b_hash_next, i++) {
 717                 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
 718                         return (fbuf);
 719         }
 720 
 721         buf->b_hash_next = buf_hash_table.ht_table[idx];
 722         buf_hash_table.ht_table[idx] = buf;
 723         buf->b_flags |= ARC_IN_HASH_TABLE;
 724 
 725         /* collect some hash table performance data */
 726         if (i > 0) {
 727                 ARCSTAT_BUMP(arcstat_hash_collisions);
 728                 if (i == 1)
 729                         ARCSTAT_BUMP(arcstat_hash_chains);
 730 
 731                 ARCSTAT_MAX(arcstat_hash_chain_max, i);
 732         }
 733 
 734         ARCSTAT_BUMP(arcstat_hash_elements);
 735         ARCSTAT_MAXSTAT(arcstat_hash_elements);
 736 
 737         return (NULL);
 738 }
 739 
 740 static void
 741 buf_hash_remove(arc_buf_hdr_t *buf)
 742 {
 743         arc_buf_hdr_t *fbuf, **bufp;
 744         uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
 745 
 746         ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
 747         ASSERT(HDR_IN_HASH_TABLE(buf));
 748 
 749         bufp = &buf_hash_table.ht_table[idx];
 750         while ((fbuf = *bufp) != buf) {
 751                 ASSERT(fbuf != NULL);
 752                 bufp = &fbuf->b_hash_next;
 753         }
 754         *bufp = buf->b_hash_next;
 755         buf->b_hash_next = NULL;
 756         buf->b_flags &= ~ARC_IN_HASH_TABLE;
 757 
 758         /* collect some hash table performance data */
 759         ARCSTAT_BUMPDOWN(arcstat_hash_elements);
 760 
 761         if (buf_hash_table.ht_table[idx] &&
 762             buf_hash_table.ht_table[idx]->b_hash_next == NULL)
 763                 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
 764 }
 765 
 766 /*
 767  * Global data structures and functions for the buf kmem cache.
 768  */
 769 static kmem_cache_t *hdr_cache;
 770 static kmem_cache_t *buf_cache;
 771 
 772 static void
 773 buf_fini(void)
 774 {
 775         int i;
 776 
 777         kmem_free(buf_hash_table.ht_table,
 778             (buf_hash_table.ht_mask + 1) * sizeof (void *));
 779         for (i = 0; i < BUF_LOCKS; i++)
 780                 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
 781         kmem_cache_destroy(hdr_cache);
 782         kmem_cache_destroy(buf_cache);
 783 }
 784 
 785 /*
 786  * Constructor callback - called when the cache is empty
 787  * and a new buf is requested.
 788  */
 789 /* ARGSUSED */
 790 static int
 791 hdr_cons(void *vbuf, void *unused, int kmflag)
 792 {
 793         arc_buf_hdr_t *buf = vbuf;
 794 
 795         bzero(buf, sizeof (arc_buf_hdr_t));
 796         refcount_create(&buf->b_refcnt);
 797         cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
 798         mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 799         arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 800 
 801         return (0);
 802 }
 803 
 804 /* ARGSUSED */
 805 static int
 806 buf_cons(void *vbuf, void *unused, int kmflag)
 807 {
 808         arc_buf_t *buf = vbuf;
 809 
 810         bzero(buf, sizeof (arc_buf_t));
 811         mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
 812         rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL);
 813         arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 814 
 815         return (0);
 816 }
 817 
 818 /*
 819  * Destructor callback - called when a cached buf is
 820  * no longer required.
 821  */
 822 /* ARGSUSED */
 823 static void
 824 hdr_dest(void *vbuf, void *unused)
 825 {
 826         arc_buf_hdr_t *buf = vbuf;
 827 
 828         ASSERT(BUF_EMPTY(buf));
 829         refcount_destroy(&buf->b_refcnt);
 830         cv_destroy(&buf->b_cv);
 831         mutex_destroy(&buf->b_freeze_lock);
 832         arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 833 }
 834 
 835 /* ARGSUSED */
 836 static void
 837 buf_dest(void *vbuf, void *unused)
 838 {
 839         arc_buf_t *buf = vbuf;
 840 
 841         mutex_destroy(&buf->b_evict_lock);
 842         rw_destroy(&buf->b_data_lock);
 843         arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
 844 }
 845 
 846 /*
 847  * Reclaim callback -- invoked when memory is low.
 848  */
 849 /* ARGSUSED */
 850 static void
 851 hdr_recl(void *unused)
 852 {
 853         dprintf("hdr_recl called\n");
 854         /*
 855          * umem calls the reclaim func when we destroy the buf cache,
 856          * which is after we do arc_fini().
 857          */
 858         if (!arc_dead)
 859                 cv_signal(&arc_reclaim_thr_cv);
 860 }
 861 
 862 static void
 863 buf_init(void)
 864 {
 865         uint64_t *ct;
 866         uint64_t hsize = 1ULL << 12;
 867         int i, j;
 868 
 869         /*
 870          * The hash table is big enough to fill all of physical memory
 871          * with an average 64K block size.  The table will take up
 872          * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
 873          */
 874         while (hsize * 65536 < physmem * PAGESIZE)
 875                 hsize <<= 1;
 876 retry:
 877         buf_hash_table.ht_mask = hsize - 1;
 878         buf_hash_table.ht_table =
 879             kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
 880         if (buf_hash_table.ht_table == NULL) {
 881                 ASSERT(hsize > (1ULL << 8));
 882                 hsize >>= 1;
 883                 goto retry;
 884         }
 885 
 886         hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
 887             0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
 888         buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
 889             0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
 890 
 891         for (i = 0; i < 256; i++)
 892                 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
 893                         *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
 894 
 895         for (i = 0; i < BUF_LOCKS; i++) {
 896                 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
 897                     NULL, MUTEX_DEFAULT, NULL);
 898         }
 899 }
 900 
 901 #define ARC_MINTIME     (hz>>4) /* 62 ms */
 902 
 903 static void
 904 arc_cksum_verify(arc_buf_t *buf)
 905 {
 906         zio_cksum_t zc;
 907 
 908         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
 909                 return;
 910 
 911         mutex_enter(&buf->b_hdr->b_freeze_lock);
 912         if (buf->b_hdr->b_freeze_cksum == NULL ||
 913             (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
 914                 mutex_exit(&buf->b_hdr->b_freeze_lock);
 915                 return;
 916         }
 917         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 918         if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
 919                 panic("buffer modified while frozen!");
 920         mutex_exit(&buf->b_hdr->b_freeze_lock);
 921 }
 922 
 923 static int
 924 arc_cksum_equal(arc_buf_t *buf)
 925 {
 926         zio_cksum_t zc;
 927         int equal;
 928 
 929         mutex_enter(&buf->b_hdr->b_freeze_lock);
 930         fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
 931         equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
 932         mutex_exit(&buf->b_hdr->b_freeze_lock);
 933 
 934         return (equal);
 935 }
 936 
 937 static void
 938 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
 939 {
 940         if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
 941                 return;
 942 
 943         mutex_enter(&buf->b_hdr->b_freeze_lock);
 944         if (buf->b_hdr->b_freeze_cksum != NULL) {
 945                 mutex_exit(&buf->b_hdr->b_freeze_lock);
 946                 return;
 947         }
 948         buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
 949         fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
 950             buf->b_hdr->b_freeze_cksum);
 951         mutex_exit(&buf->b_hdr->b_freeze_lock);
 952 }
 953 
 954 void
 955 arc_buf_thaw(arc_buf_t *buf)
 956 {
 957         if (zfs_flags & ZFS_DEBUG_MODIFY) {
 958                 if (buf->b_hdr->b_state != arc_anon)
 959                         panic("modifying non-anon buffer!");
 960                 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
 961                         panic("modifying buffer while i/o in progress!");
 962                 arc_cksum_verify(buf);
 963         }
 964 
 965         mutex_enter(&buf->b_hdr->b_freeze_lock);
 966         if (buf->b_hdr->b_freeze_cksum != NULL) {
 967                 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
 968                 buf->b_hdr->b_freeze_cksum = NULL;
 969         }
 970 
 971         if (zfs_flags & ZFS_DEBUG_MODIFY) {
 972                 if (buf->b_hdr->b_thawed)
 973                         kmem_free(buf->b_hdr->b_thawed, 1);
 974                 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
 975         }
 976 
 977         mutex_exit(&buf->b_hdr->b_freeze_lock);
 978 }
 979 
 980 void
 981 arc_buf_freeze(arc_buf_t *buf)
 982 {
 983         kmutex_t *hash_lock;
 984 
 985         if (!(zfs_flags & ZFS_DEBUG_MODIFY))
 986                 return;
 987 
 988         hash_lock = HDR_LOCK(buf->b_hdr);
 989         mutex_enter(hash_lock);
 990 
 991         ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
 992             buf->b_hdr->b_state == arc_anon);
 993         arc_cksum_compute(buf, B_FALSE);
 994         mutex_exit(hash_lock);
 995 }
 996 
 997 static void
 998 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
 999 {
1000         ASSERT(MUTEX_HELD(hash_lock));
1001 
1002         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1003             (ab->b_state != arc_anon)) {
1004                 uint64_t delta = ab->b_size * ab->b_datacnt;
1005                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1006                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1007 
1008                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1009                 mutex_enter(&ab->b_state->arcs_mtx);
1010                 ASSERT(list_link_active(&ab->b_arc_node));
1011                 list_remove(list, ab);
1012                 if (GHOST_STATE(ab->b_state)) {
1013                         ASSERT3U(ab->b_datacnt, ==, 0);
1014                         ASSERT3P(ab->b_buf, ==, NULL);
1015                         delta = ab->b_size;
1016                 }
1017                 ASSERT(delta > 0);
1018                 ASSERT3U(*size, >=, delta);
1019                 atomic_add_64(size, -delta);
1020                 mutex_exit(&ab->b_state->arcs_mtx);
1021                 /* remove the prefetch flag if we get a reference */
1022                 if (ab->b_flags & ARC_PREFETCH)
1023                         ab->b_flags &= ~ARC_PREFETCH;
1024         }
1025 }
1026 
1027 static int
1028 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1029 {
1030         int cnt;
1031         arc_state_t *state = ab->b_state;
1032 
1033         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1034         ASSERT(!GHOST_STATE(state));
1035 
1036         if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1037             (state != arc_anon)) {
1038                 uint64_t *size = &state->arcs_lsize[ab->b_type];
1039 
1040                 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1041                 mutex_enter(&state->arcs_mtx);
1042                 ASSERT(!list_link_active(&ab->b_arc_node));
1043                 list_insert_head(&state->arcs_list[ab->b_type], ab);
1044                 ASSERT(ab->b_datacnt > 0);
1045                 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1046                 mutex_exit(&state->arcs_mtx);
1047         }
1048         return (cnt);
1049 }
1050 
1051 /*
1052  * Move the supplied buffer to the indicated state.  The mutex
1053  * for the buffer must be held by the caller.
1054  */
1055 static void
1056 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1057 {
1058         arc_state_t *old_state = ab->b_state;
1059         int64_t refcnt = refcount_count(&ab->b_refcnt);
1060         uint64_t from_delta, to_delta;
1061 
1062         ASSERT(MUTEX_HELD(hash_lock));
1063         ASSERT(new_state != old_state);
1064         ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1065         ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1066         ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1067 
1068         from_delta = to_delta = ab->b_datacnt * ab->b_size;
1069 
1070         /*
1071          * If this buffer is evictable, transfer it from the
1072          * old state list to the new state list.
1073          */
1074         if (refcnt == 0) {
1075                 if (old_state != arc_anon) {
1076                         int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1077                         uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1078 
1079                         if (use_mutex)
1080                                 mutex_enter(&old_state->arcs_mtx);
1081 
1082                         ASSERT(list_link_active(&ab->b_arc_node));
1083                         list_remove(&old_state->arcs_list[ab->b_type], ab);
1084 
1085                         /*
1086                          * If prefetching out of the ghost cache,
1087                          * we will have a non-zero datacnt.
1088                          */
1089                         if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1090                                 /* ghost elements have a ghost size */
1091                                 ASSERT(ab->b_buf == NULL);
1092                                 from_delta = ab->b_size;
1093                         }
1094                         ASSERT3U(*size, >=, from_delta);
1095                         atomic_add_64(size, -from_delta);
1096 
1097                         if (use_mutex)
1098                                 mutex_exit(&old_state->arcs_mtx);
1099                 }
1100                 if (new_state != arc_anon) {
1101                         int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1102                         uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1103 
1104                         if (use_mutex)
1105                                 mutex_enter(&new_state->arcs_mtx);
1106 
1107                         list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1108 
1109                         /* ghost elements have a ghost size */
1110                         if (GHOST_STATE(new_state)) {
1111                                 ASSERT(ab->b_datacnt == 0);
1112                                 ASSERT(ab->b_buf == NULL);
1113                                 to_delta = ab->b_size;
1114                         }
1115                         atomic_add_64(size, to_delta);
1116 
1117                         if (use_mutex)
1118                                 mutex_exit(&new_state->arcs_mtx);
1119                 }
1120         }
1121 
1122         ASSERT(!BUF_EMPTY(ab));
1123         if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1124                 buf_hash_remove(ab);
1125 
1126         /* adjust state sizes */
1127         if (to_delta)
1128                 atomic_add_64(&new_state->arcs_size, to_delta);
1129         if (from_delta) {
1130                 ASSERT3U(old_state->arcs_size, >=, from_delta);
1131                 atomic_add_64(&old_state->arcs_size, -from_delta);
1132         }
1133         ab->b_state = new_state;
1134 
1135         /* adjust l2arc hdr stats */
1136         if (new_state == arc_l2c_only)
1137                 l2arc_hdr_stat_add();
1138         else if (old_state == arc_l2c_only)
1139                 l2arc_hdr_stat_remove();
1140 }
1141 
1142 void
1143 arc_space_consume(uint64_t space, arc_space_type_t type)
1144 {
1145         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1146 
1147         switch (type) {
1148         case ARC_SPACE_DATA:
1149                 ARCSTAT_INCR(arcstat_data_size, space);
1150                 break;
1151         case ARC_SPACE_OTHER:
1152                 ARCSTAT_INCR(arcstat_other_size, space);
1153                 break;
1154         case ARC_SPACE_HDRS:
1155                 ARCSTAT_INCR(arcstat_hdr_size, space);
1156                 break;
1157         case ARC_SPACE_L2HDRS:
1158                 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1159                 break;
1160         }
1161 
1162         atomic_add_64(&arc_meta_used, space);
1163         atomic_add_64(&arc_size, space);
1164 }
1165 
1166 void
1167 arc_space_return(uint64_t space, arc_space_type_t type)
1168 {
1169         ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1170 
1171         switch (type) {
1172         case ARC_SPACE_DATA:
1173                 ARCSTAT_INCR(arcstat_data_size, -space);
1174                 break;
1175         case ARC_SPACE_OTHER:
1176                 ARCSTAT_INCR(arcstat_other_size, -space);
1177                 break;
1178         case ARC_SPACE_HDRS:
1179                 ARCSTAT_INCR(arcstat_hdr_size, -space);
1180                 break;
1181         case ARC_SPACE_L2HDRS:
1182                 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1183                 break;
1184         }
1185 
1186         ASSERT(arc_meta_used >= space);
1187         if (arc_meta_max < arc_meta_used)
1188                 arc_meta_max = arc_meta_used;
1189         atomic_add_64(&arc_meta_used, -space);
1190         ASSERT(arc_size >= space);
1191         atomic_add_64(&arc_size, -space);
1192 }
1193 
1194 void *
1195 arc_data_buf_alloc(uint64_t size)
1196 {
1197         if (arc_evict_needed(ARC_BUFC_DATA))
1198                 cv_signal(&arc_reclaim_thr_cv);
1199         atomic_add_64(&arc_size, size);
1200         return (zio_data_buf_alloc(size));
1201 }
1202 
1203 void
1204 arc_data_buf_free(void *buf, uint64_t size)
1205 {
1206         zio_data_buf_free(buf, size);
1207         ASSERT(arc_size >= size);
1208         atomic_add_64(&arc_size, -size);
1209 }
1210 
1211 arc_buf_t *
1212 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1213 {
1214         arc_buf_hdr_t *hdr;
1215         arc_buf_t *buf;
1216 
1217         ASSERT3U(size, >, 0);
1218         hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1219         ASSERT(BUF_EMPTY(hdr));
1220         hdr->b_size = size;
1221         hdr->b_type = type;
1222         hdr->b_spa = spa_load_guid(spa);
1223         hdr->b_state = arc_anon;
1224         hdr->b_arc_access = 0;
1225         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1226         buf->b_hdr = hdr;
1227         buf->b_data = NULL;
1228         buf->b_efunc = NULL;
1229         buf->b_private = NULL;
1230         buf->b_next = NULL;
1231         hdr->b_buf = buf;
1232         arc_get_data_buf(buf);
1233         hdr->b_datacnt = 1;
1234         hdr->b_flags = 0;
1235         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1236         (void) refcount_add(&hdr->b_refcnt, tag);
1237 
1238         return (buf);
1239 }
1240 
1241 static char *arc_onloan_tag = "onloan";
1242 
1243 /*
1244  * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1245  * flight data by arc_tempreserve_space() until they are "returned". Loaned
1246  * buffers must be returned to the arc before they can be used by the DMU or
1247  * freed.
1248  */
1249 arc_buf_t *
1250 arc_loan_buf(spa_t *spa, int size)
1251 {
1252         arc_buf_t *buf;
1253 
1254         buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1255 
1256         atomic_add_64(&arc_loaned_bytes, size);
1257         return (buf);
1258 }
1259 
1260 /*
1261  * Return a loaned arc buffer to the arc.
1262  */
1263 void
1264 arc_return_buf(arc_buf_t *buf, void *tag)
1265 {
1266         arc_buf_hdr_t *hdr = buf->b_hdr;
1267 
1268         ASSERT(buf->b_data != NULL);
1269         (void) refcount_add(&hdr->b_refcnt, tag);
1270         (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1271 
1272         atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1273 }
1274 
1275 /* Detach an arc_buf from a dbuf (tag) */
1276 void
1277 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1278 {
1279         arc_buf_hdr_t *hdr;
1280 
1281         ASSERT(buf->b_data != NULL);
1282         hdr = buf->b_hdr;
1283         (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1284         (void) refcount_remove(&hdr->b_refcnt, tag);
1285         buf->b_efunc = NULL;
1286         buf->b_private = NULL;
1287 
1288         atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1289 }
1290 
1291 static arc_buf_t *
1292 arc_buf_clone(arc_buf_t *from)
1293 {
1294         arc_buf_t *buf;
1295         arc_buf_hdr_t *hdr = from->b_hdr;
1296         uint64_t size = hdr->b_size;
1297 
1298         ASSERT(hdr->b_state != arc_anon);
1299 
1300         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1301         buf->b_hdr = hdr;
1302         buf->b_data = NULL;
1303         buf->b_efunc = NULL;
1304         buf->b_private = NULL;
1305         buf->b_next = hdr->b_buf;
1306         hdr->b_buf = buf;
1307         arc_get_data_buf(buf);
1308         bcopy(from->b_data, buf->b_data, size);
1309         hdr->b_datacnt += 1;
1310         return (buf);
1311 }
1312 
1313 void
1314 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1315 {
1316         arc_buf_hdr_t *hdr;
1317         kmutex_t *hash_lock;
1318 
1319         /*
1320          * Check to see if this buffer is evicted.  Callers
1321          * must verify b_data != NULL to know if the add_ref
1322          * was successful.
1323          */
1324         mutex_enter(&buf->b_evict_lock);
1325         if (buf->b_data == NULL) {
1326                 mutex_exit(&buf->b_evict_lock);
1327                 return;
1328         }
1329         hash_lock = HDR_LOCK(buf->b_hdr);
1330         mutex_enter(hash_lock);
1331         hdr = buf->b_hdr;
1332         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1333         mutex_exit(&buf->b_evict_lock);
1334 
1335         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1336         add_reference(hdr, hash_lock, tag);
1337         DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1338         arc_access(hdr, hash_lock);
1339         mutex_exit(hash_lock);
1340         ARCSTAT_BUMP(arcstat_hits);
1341         ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1342             demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1343             data, metadata, hits);
1344 }
1345 
1346 /*
1347  * Free the arc data buffer.  If it is an l2arc write in progress,
1348  * the buffer is placed on l2arc_free_on_write to be freed later.
1349  */
1350 static void
1351 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1352     void *data, size_t size)
1353 {
1354         if (HDR_L2_WRITING(hdr)) {
1355                 l2arc_data_free_t *df;
1356                 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1357                 df->l2df_data = data;
1358                 df->l2df_size = size;
1359                 df->l2df_func = free_func;
1360                 mutex_enter(&l2arc_free_on_write_mtx);
1361                 list_insert_head(l2arc_free_on_write, df);
1362                 mutex_exit(&l2arc_free_on_write_mtx);
1363                 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1364         } else {
1365                 free_func(data, size);
1366         }
1367 }
1368 
1369 static void
1370 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1371 {
1372         arc_buf_t **bufp;
1373 
1374         /* free up data associated with the buf */
1375         if (buf->b_data) {
1376                 arc_state_t *state = buf->b_hdr->b_state;
1377                 uint64_t size = buf->b_hdr->b_size;
1378                 arc_buf_contents_t type = buf->b_hdr->b_type;
1379 
1380                 arc_cksum_verify(buf);
1381 
1382                 if (!recycle) {
1383                         if (type == ARC_BUFC_METADATA) {
1384                                 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1385                                     buf->b_data, size);
1386                                 arc_space_return(size, ARC_SPACE_DATA);
1387                         } else {
1388                                 ASSERT(type == ARC_BUFC_DATA);
1389                                 arc_buf_data_free(buf->b_hdr,
1390                                     zio_data_buf_free, buf->b_data, size);
1391                                 ARCSTAT_INCR(arcstat_data_size, -size);
1392                                 atomic_add_64(&arc_size, -size);
1393                         }
1394                 }
1395                 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1396                         uint64_t *cnt = &state->arcs_lsize[type];
1397 
1398                         ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1399                         ASSERT(state != arc_anon);
1400 
1401                         ASSERT3U(*cnt, >=, size);
1402                         atomic_add_64(cnt, -size);
1403                 }
1404                 ASSERT3U(state->arcs_size, >=, size);
1405                 atomic_add_64(&state->arcs_size, -size);
1406                 buf->b_data = NULL;
1407                 ASSERT(buf->b_hdr->b_datacnt > 0);
1408                 buf->b_hdr->b_datacnt -= 1;
1409         }
1410 
1411         /* only remove the buf if requested */
1412         if (!all)
1413                 return;
1414 
1415         /* remove the buf from the hdr list */
1416         for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1417                 continue;
1418         *bufp = buf->b_next;
1419         buf->b_next = NULL;
1420 
1421         ASSERT(buf->b_efunc == NULL);
1422 
1423         /* clean up the buf */
1424         buf->b_hdr = NULL;
1425         kmem_cache_free(buf_cache, buf);
1426 }
1427 
1428 static void
1429 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1430 {
1431         ASSERT(refcount_is_zero(&hdr->b_refcnt));
1432         ASSERT3P(hdr->b_state, ==, arc_anon);
1433         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1434         l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1435 
1436         if (l2hdr != NULL) {
1437                 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1438                 /*
1439                  * To prevent arc_free() and l2arc_evict() from
1440                  * attempting to free the same buffer at the same time,
1441                  * a FREE_IN_PROGRESS flag is given to arc_free() to
1442                  * give it priority.  l2arc_evict() can't destroy this
1443                  * header while we are waiting on l2arc_buflist_mtx.
1444                  *
1445                  * The hdr may be removed from l2ad_buflist before we
1446                  * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1447                  */
1448                 if (!buflist_held) {
1449                         mutex_enter(&l2arc_buflist_mtx);
1450                         l2hdr = hdr->b_l2hdr;
1451                 }
1452 
1453                 if (l2hdr != NULL) {
1454                         list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1455                         ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1456                         kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1457                         if (hdr->b_state == arc_l2c_only)
1458                                 l2arc_hdr_stat_remove();
1459                         hdr->b_l2hdr = NULL;
1460                 }
1461 
1462                 if (!buflist_held)
1463                         mutex_exit(&l2arc_buflist_mtx);
1464         }
1465 
1466         if (!BUF_EMPTY(hdr)) {
1467                 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1468                 buf_discard_identity(hdr);
1469         }
1470         while (hdr->b_buf) {
1471                 arc_buf_t *buf = hdr->b_buf;
1472 
1473                 if (buf->b_efunc) {
1474                         mutex_enter(&arc_eviction_mtx);
1475                         mutex_enter(&buf->b_evict_lock);
1476                         ASSERT(buf->b_hdr != NULL);
1477                         arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1478                         hdr->b_buf = buf->b_next;
1479                         buf->b_hdr = &arc_eviction_hdr;
1480                         buf->b_next = arc_eviction_list;
1481                         arc_eviction_list = buf;
1482                         mutex_exit(&buf->b_evict_lock);
1483                         mutex_exit(&arc_eviction_mtx);
1484                 } else {
1485                         arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1486                 }
1487         }
1488         if (hdr->b_freeze_cksum != NULL) {
1489                 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1490                 hdr->b_freeze_cksum = NULL;
1491         }
1492         if (hdr->b_thawed) {
1493                 kmem_free(hdr->b_thawed, 1);
1494                 hdr->b_thawed = NULL;
1495         }
1496 
1497         ASSERT(!list_link_active(&hdr->b_arc_node));
1498         ASSERT3P(hdr->b_hash_next, ==, NULL);
1499         ASSERT3P(hdr->b_acb, ==, NULL);
1500         kmem_cache_free(hdr_cache, hdr);
1501 }
1502 
1503 void
1504 arc_buf_free(arc_buf_t *buf, void *tag)
1505 {
1506         arc_buf_hdr_t *hdr = buf->b_hdr;
1507         int hashed = hdr->b_state != arc_anon;
1508 
1509         ASSERT(buf->b_efunc == NULL);
1510         ASSERT(buf->b_data != NULL);
1511 
1512         if (hashed) {
1513                 kmutex_t *hash_lock = HDR_LOCK(hdr);
1514 
1515                 mutex_enter(hash_lock);
1516                 hdr = buf->b_hdr;
1517                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1518 
1519                 (void) remove_reference(hdr, hash_lock, tag);
1520                 if (hdr->b_datacnt > 1) {
1521                         arc_buf_destroy(buf, FALSE, TRUE);
1522                 } else {
1523                         ASSERT(buf == hdr->b_buf);
1524                         ASSERT(buf->b_efunc == NULL);
1525                         hdr->b_flags |= ARC_BUF_AVAILABLE;
1526                 }
1527                 mutex_exit(hash_lock);
1528         } else if (HDR_IO_IN_PROGRESS(hdr)) {
1529                 int destroy_hdr;
1530                 /*
1531                  * We are in the middle of an async write.  Don't destroy
1532                  * this buffer unless the write completes before we finish
1533                  * decrementing the reference count.
1534                  */
1535                 mutex_enter(&arc_eviction_mtx);
1536                 (void) remove_reference(hdr, NULL, tag);
1537                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1538                 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1539                 mutex_exit(&arc_eviction_mtx);
1540                 if (destroy_hdr)
1541                         arc_hdr_destroy(hdr);
1542         } else {
1543                 if (remove_reference(hdr, NULL, tag) > 0)
1544                         arc_buf_destroy(buf, FALSE, TRUE);
1545                 else
1546                         arc_hdr_destroy(hdr);
1547         }
1548 }
1549 
1550 int
1551 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1552 {
1553         arc_buf_hdr_t *hdr = buf->b_hdr;
1554         kmutex_t *hash_lock = HDR_LOCK(hdr);
1555         int no_callback = (buf->b_efunc == NULL);
1556 
1557         if (hdr->b_state == arc_anon) {
1558                 ASSERT(hdr->b_datacnt == 1);
1559                 arc_buf_free(buf, tag);
1560                 return (no_callback);
1561         }
1562 
1563         mutex_enter(hash_lock);
1564         hdr = buf->b_hdr;
1565         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1566         ASSERT(hdr->b_state != arc_anon);
1567         ASSERT(buf->b_data != NULL);
1568 
1569         (void) remove_reference(hdr, hash_lock, tag);
1570         if (hdr->b_datacnt > 1) {
1571                 if (no_callback)
1572                         arc_buf_destroy(buf, FALSE, TRUE);
1573         } else if (no_callback) {
1574                 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1575                 ASSERT(buf->b_efunc == NULL);
1576                 hdr->b_flags |= ARC_BUF_AVAILABLE;
1577         }
1578         ASSERT(no_callback || hdr->b_datacnt > 1 ||
1579             refcount_is_zero(&hdr->b_refcnt));
1580         mutex_exit(hash_lock);
1581         return (no_callback);
1582 }
1583 
1584 int
1585 arc_buf_size(arc_buf_t *buf)
1586 {
1587         return (buf->b_hdr->b_size);
1588 }
1589 
1590 /*
1591  * Evict buffers from list until we've removed the specified number of
1592  * bytes.  Move the removed buffers to the appropriate evict state.
1593  * If the recycle flag is set, then attempt to "recycle" a buffer:
1594  * - look for a buffer to evict that is `bytes' long.
1595  * - return the data block from this buffer rather than freeing it.
1596  * This flag is used by callers that are trying to make space for a
1597  * new buffer in a full arc cache.
1598  *
1599  * This function makes a "best effort".  It skips over any buffers
1600  * it can't get a hash_lock on, and so may not catch all candidates.
1601  * It may also return without evicting as much space as requested.
1602  */
1603 static void *
1604 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1605     arc_buf_contents_t type)
1606 {
1607         arc_state_t *evicted_state;
1608         uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1609         arc_buf_hdr_t *ab, *ab_prev = NULL;
1610         list_t *list = &state->arcs_list[type];
1611         kmutex_t *hash_lock;
1612         boolean_t have_lock;
1613         void *stolen = NULL;
1614 
1615         ASSERT(state == arc_mru || state == arc_mfu);
1616 
1617         evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1618 
1619         mutex_enter(&state->arcs_mtx);
1620         mutex_enter(&evicted_state->arcs_mtx);
1621 
1622         for (ab = list_tail(list); ab; ab = ab_prev) {
1623                 ab_prev = list_prev(list, ab);
1624                 /* prefetch buffers have a minimum lifespan */
1625                 if (HDR_IO_IN_PROGRESS(ab) ||
1626                     (spa && ab->b_spa != spa) ||
1627                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1628                     ddi_get_lbolt() - ab->b_arc_access <
1629                     arc_min_prefetch_lifespan)) {
1630                         skipped++;
1631                         continue;
1632                 }
1633                 /* "lookahead" for better eviction candidate */
1634                 if (recycle && ab->b_size != bytes &&
1635                     ab_prev && ab_prev->b_size == bytes)
1636                         continue;
1637                 hash_lock = HDR_LOCK(ab);
1638                 have_lock = MUTEX_HELD(hash_lock);
1639                 if (have_lock || mutex_tryenter(hash_lock)) {
1640                         ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1641                         ASSERT(ab->b_datacnt > 0);
1642                         while (ab->b_buf) {
1643                                 arc_buf_t *buf = ab->b_buf;
1644                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1645                                         missed += 1;
1646                                         break;
1647                                 }
1648                                 if (buf->b_data) {
1649                                         bytes_evicted += ab->b_size;
1650                                         if (recycle && ab->b_type == type &&
1651                                             ab->b_size == bytes &&
1652                                             !HDR_L2_WRITING(ab)) {
1653                                                 stolen = buf->b_data;
1654                                                 recycle = FALSE;
1655                                         }
1656                                 }
1657                                 if (buf->b_efunc) {
1658                                         mutex_enter(&arc_eviction_mtx);
1659                                         arc_buf_destroy(buf,
1660                                             buf->b_data == stolen, FALSE);
1661                                         ab->b_buf = buf->b_next;
1662                                         buf->b_hdr = &arc_eviction_hdr;
1663                                         buf->b_next = arc_eviction_list;
1664                                         arc_eviction_list = buf;
1665                                         mutex_exit(&arc_eviction_mtx);
1666                                         mutex_exit(&buf->b_evict_lock);
1667                                 } else {
1668                                         mutex_exit(&buf->b_evict_lock);
1669                                         arc_buf_destroy(buf,
1670                                             buf->b_data == stolen, TRUE);
1671                                 }
1672                         }
1673 
1674                         if (ab->b_l2hdr) {
1675                                 ARCSTAT_INCR(arcstat_evict_l2_cached,
1676                                     ab->b_size);
1677                         } else {
1678                                 if (l2arc_write_eligible(ab->b_spa, ab)) {
1679                                         ARCSTAT_INCR(arcstat_evict_l2_eligible,
1680                                             ab->b_size);
1681                                 } else {
1682                                         ARCSTAT_INCR(
1683                                             arcstat_evict_l2_ineligible,
1684                                             ab->b_size);
1685                                 }
1686                         }
1687 
1688                         if (ab->b_datacnt == 0) {
1689                                 arc_change_state(evicted_state, ab, hash_lock);
1690                                 ASSERT(HDR_IN_HASH_TABLE(ab));
1691                                 ab->b_flags |= ARC_IN_HASH_TABLE;
1692                                 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1693                                 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1694                         }
1695                         if (!have_lock)
1696                                 mutex_exit(hash_lock);
1697                         if (bytes >= 0 && bytes_evicted >= bytes)
1698                                 break;
1699                 } else {
1700                         missed += 1;
1701                 }
1702         }
1703 
1704         mutex_exit(&evicted_state->arcs_mtx);
1705         mutex_exit(&state->arcs_mtx);
1706 
1707         if (bytes_evicted < bytes)
1708                 dprintf("only evicted %lld bytes from %x",
1709                     (longlong_t)bytes_evicted, state);
1710 
1711         if (skipped)
1712                 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1713 
1714         if (missed)
1715                 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1716 
1717         /*
1718          * We have just evicted some date into the ghost state, make
1719          * sure we also adjust the ghost state size if necessary.
1720          */
1721         if (arc_no_grow &&
1722             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1723                 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1724                     arc_mru_ghost->arcs_size - arc_c;
1725 
1726                 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1727                         int64_t todelete =
1728                             MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1729                         arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1730                 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1731                         int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1732                             arc_mru_ghost->arcs_size +
1733                             arc_mfu_ghost->arcs_size - arc_c);
1734                         arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1735                 }
1736         }
1737 
1738         return (stolen);
1739 }
1740 
1741 /*
1742  * Remove buffers from list until we've removed the specified number of
1743  * bytes.  Destroy the buffers that are removed.
1744  */
1745 static void
1746 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1747 {
1748         arc_buf_hdr_t *ab, *ab_prev;
1749         arc_buf_hdr_t marker = { 0 };
1750         list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1751         kmutex_t *hash_lock;
1752         uint64_t bytes_deleted = 0;
1753         uint64_t bufs_skipped = 0;
1754 
1755         ASSERT(GHOST_STATE(state));
1756 top:
1757         mutex_enter(&state->arcs_mtx);
1758         for (ab = list_tail(list); ab; ab = ab_prev) {
1759                 ab_prev = list_prev(list, ab);
1760                 if (spa && ab->b_spa != spa)
1761                         continue;
1762 
1763                 /* ignore markers */
1764                 if (ab->b_spa == 0)
1765                         continue;
1766 
1767                 hash_lock = HDR_LOCK(ab);
1768                 /* caller may be trying to modify this buffer, skip it */
1769                 if (MUTEX_HELD(hash_lock))
1770                         continue;
1771                 if (mutex_tryenter(hash_lock)) {
1772                         ASSERT(!HDR_IO_IN_PROGRESS(ab));
1773                         ASSERT(ab->b_buf == NULL);
1774                         ARCSTAT_BUMP(arcstat_deleted);
1775                         bytes_deleted += ab->b_size;
1776 
1777                         if (ab->b_l2hdr != NULL) {
1778                                 /*
1779                                  * This buffer is cached on the 2nd Level ARC;
1780                                  * don't destroy the header.
1781                                  */
1782                                 arc_change_state(arc_l2c_only, ab, hash_lock);
1783                                 mutex_exit(hash_lock);
1784                         } else {
1785                                 arc_change_state(arc_anon, ab, hash_lock);
1786                                 mutex_exit(hash_lock);
1787                                 arc_hdr_destroy(ab);
1788                         }
1789 
1790                         DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1791                         if (bytes >= 0 && bytes_deleted >= bytes)
1792                                 break;
1793                 } else if (bytes < 0) {
1794                         /*
1795                          * Insert a list marker and then wait for the
1796                          * hash lock to become available. Once its
1797                          * available, restart from where we left off.
1798                          */
1799                         list_insert_after(list, ab, &marker);
1800                         mutex_exit(&state->arcs_mtx);
1801                         mutex_enter(hash_lock);
1802                         mutex_exit(hash_lock);
1803                         mutex_enter(&state->arcs_mtx);
1804                         ab_prev = list_prev(list, &marker);
1805                         list_remove(list, &marker);
1806                 } else
1807                         bufs_skipped += 1;
1808         }
1809         mutex_exit(&state->arcs_mtx);
1810 
1811         if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1812             (bytes < 0 || bytes_deleted < bytes)) {
1813                 list = &state->arcs_list[ARC_BUFC_METADATA];
1814                 goto top;
1815         }
1816 
1817         if (bufs_skipped) {
1818                 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1819                 ASSERT(bytes >= 0);
1820         }
1821 
1822         if (bytes_deleted < bytes)
1823                 dprintf("only deleted %lld bytes from %p",
1824                     (longlong_t)bytes_deleted, state);
1825 }
1826 
1827 static void
1828 arc_adjust(void)
1829 {
1830         int64_t adjustment, delta;
1831 
1832         /*
1833          * Adjust MRU size
1834          */
1835 
1836         adjustment = MIN((int64_t)(arc_size - arc_c),
1837             (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
1838             arc_p));
1839 
1840         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1841                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1842                 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
1843                 adjustment -= delta;
1844         }
1845 
1846         if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1847                 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1848                 (void) arc_evict(arc_mru, NULL, delta, FALSE,
1849                     ARC_BUFC_METADATA);
1850         }
1851 
1852         /*
1853          * Adjust MFU size
1854          */
1855 
1856         adjustment = arc_size - arc_c;
1857 
1858         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1859                 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1860                 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
1861                 adjustment -= delta;
1862         }
1863 
1864         if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1865                 int64_t delta = MIN(adjustment,
1866                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
1867                 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
1868                     ARC_BUFC_METADATA);
1869         }
1870 
1871         /*
1872          * Adjust ghost lists
1873          */
1874 
1875         adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
1876 
1877         if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
1878                 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
1879                 arc_evict_ghost(arc_mru_ghost, NULL, delta);
1880         }
1881 
1882         adjustment =
1883             arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
1884 
1885         if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
1886                 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
1887                 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
1888         }
1889 }
1890 
1891 static void
1892 arc_do_user_evicts(void)
1893 {
1894         mutex_enter(&arc_eviction_mtx);
1895         while (arc_eviction_list != NULL) {
1896                 arc_buf_t *buf = arc_eviction_list;
1897                 arc_eviction_list = buf->b_next;
1898                 mutex_enter(&buf->b_evict_lock);
1899                 buf->b_hdr = NULL;
1900                 mutex_exit(&buf->b_evict_lock);
1901                 mutex_exit(&arc_eviction_mtx);
1902 
1903                 if (buf->b_efunc != NULL)
1904                         VERIFY(buf->b_efunc(buf) == 0);
1905 
1906                 buf->b_efunc = NULL;
1907                 buf->b_private = NULL;
1908                 kmem_cache_free(buf_cache, buf);
1909                 mutex_enter(&arc_eviction_mtx);
1910         }
1911         mutex_exit(&arc_eviction_mtx);
1912 }
1913 
1914 /*
1915  * Flush all *evictable* data from the cache for the given spa.
1916  * NOTE: this will not touch "active" (i.e. referenced) data.
1917  */
1918 void
1919 arc_flush(spa_t *spa)
1920 {
1921         uint64_t guid = 0;
1922 
1923         if (spa)
1924                 guid = spa_load_guid(spa);
1925 
1926         while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1927                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
1928                 if (spa)
1929                         break;
1930         }
1931         while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1932                 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
1933                 if (spa)
1934                         break;
1935         }
1936         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1937                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
1938                 if (spa)
1939                         break;
1940         }
1941         while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1942                 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
1943                 if (spa)
1944                         break;
1945         }
1946 
1947         arc_evict_ghost(arc_mru_ghost, guid, -1);
1948         arc_evict_ghost(arc_mfu_ghost, guid, -1);
1949 
1950         mutex_enter(&arc_reclaim_thr_lock);
1951         arc_do_user_evicts();
1952         mutex_exit(&arc_reclaim_thr_lock);
1953         ASSERT(spa || arc_eviction_list == NULL);
1954 }
1955 
1956 void
1957 arc_shrink(void)
1958 {
1959         if (arc_c > arc_c_min) {
1960                 uint64_t to_free;
1961 
1962 #ifdef _KERNEL
1963                 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1964 #else
1965                 to_free = arc_c >> arc_shrink_shift;
1966 #endif
1967                 if (arc_c > arc_c_min + to_free)
1968                         atomic_add_64(&arc_c, -to_free);
1969                 else
1970                         arc_c = arc_c_min;
1971 
1972                 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1973                 if (arc_c > arc_size)
1974                         arc_c = MAX(arc_size, arc_c_min);
1975                 if (arc_p > arc_c)
1976                         arc_p = (arc_c >> 1);
1977                 ASSERT(arc_c >= arc_c_min);
1978                 ASSERT((int64_t)arc_p >= 0);
1979         }
1980 
1981         if (arc_size > arc_c)
1982                 arc_adjust();
1983 }
1984 
1985 /*
1986  * Determine if the system is under memory pressure and is asking
1987  * to reclaim memory. A return value of 1 indicates that the system
1988  * is under memory pressure and that the arc should adjust accordingly.
1989  */
1990 static int
1991 arc_reclaim_needed(void)
1992 {
1993         uint64_t extra;
1994 
1995 #ifdef _KERNEL
1996 
1997         if (needfree)
1998                 return (1);
1999 
2000         /*
2001          * take 'desfree' extra pages, so we reclaim sooner, rather than later
2002          */
2003         extra = desfree;
2004 
2005         /*
2006          * check that we're out of range of the pageout scanner.  It starts to
2007          * schedule paging if freemem is less than lotsfree and needfree.
2008          * lotsfree is the high-water mark for pageout, and needfree is the
2009          * number of needed free pages.  We add extra pages here to make sure
2010          * the scanner doesn't start up while we're freeing memory.
2011          */
2012         if (freemem < lotsfree + needfree + extra)
2013                 return (1);
2014 
2015         /*
2016          * check to make sure that swapfs has enough space so that anon
2017          * reservations can still succeed. anon_resvmem() checks that the
2018          * availrmem is greater than swapfs_minfree, and the number of reserved
2019          * swap pages.  We also add a bit of extra here just to prevent
2020          * circumstances from getting really dire.
2021          */
2022         if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2023                 return (1);
2024 
2025 #if defined(__i386)
2026         /*
2027          * If we're on an i386 platform, it's possible that we'll exhaust the
2028          * kernel heap space before we ever run out of available physical
2029          * memory.  Most checks of the size of the heap_area compare against
2030          * tune.t_minarmem, which is the minimum available real memory that we
2031          * can have in the system.  However, this is generally fixed at 25 pages
2032          * which is so low that it's useless.  In this comparison, we seek to
2033          * calculate the total heap-size, and reclaim if more than 3/4ths of the
2034          * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2035          * free)
2036          */
2037         if (vmem_size(heap_arena, VMEM_FREE) <
2038             (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2039                 return (1);
2040 #endif
2041 
2042         /*
2043          * If zio data pages are being allocated out of a separate heap segment,
2044          * then enforce that the size of available vmem for this arena remains
2045          * above about 1/16th free.
2046          *
2047          * Note: The 1/16th arena free requirement was put in place
2048          * to aggressively evict memory from the arc in order to avoid
2049          * memory fragmentation issues.
2050          */
2051         if (zio_arena != NULL &&
2052             vmem_size(zio_arena, VMEM_FREE) <
2053             (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2054                 return (1);
2055 #else
2056         if (spa_get_random(100) == 0)
2057                 return (1);
2058 #endif
2059         return (0);
2060 }
2061 
2062 static void
2063 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2064 {
2065         size_t                  i;
2066         kmem_cache_t            *prev_cache = NULL;
2067         kmem_cache_t            *prev_data_cache = NULL;
2068         extern kmem_cache_t     *zio_buf_cache[];
2069         extern kmem_cache_t     *zio_data_buf_cache[];
2070 
2071 #ifdef _KERNEL
2072         if (arc_meta_used >= arc_meta_limit) {
2073                 /*
2074                  * We are exceeding our meta-data cache limit.
2075                  * Purge some DNLC entries to release holds on meta-data.
2076                  */
2077                 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2078         }
2079 #if defined(__i386)
2080         /*
2081          * Reclaim unused memory from all kmem caches.
2082          */
2083         kmem_reap();
2084 #endif
2085 #endif
2086 
2087         /*
2088          * An aggressive reclamation will shrink the cache size as well as
2089          * reap free buffers from the arc kmem caches.
2090          */
2091         if (strat == ARC_RECLAIM_AGGR)
2092                 arc_shrink();
2093 
2094         for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2095                 if (zio_buf_cache[i] != prev_cache) {
2096                         prev_cache = zio_buf_cache[i];
2097                         kmem_cache_reap_now(zio_buf_cache[i]);
2098                 }
2099                 if (zio_data_buf_cache[i] != prev_data_cache) {
2100                         prev_data_cache = zio_data_buf_cache[i];
2101                         kmem_cache_reap_now(zio_data_buf_cache[i]);
2102                 }
2103         }
2104         kmem_cache_reap_now(buf_cache);
2105         kmem_cache_reap_now(hdr_cache);
2106 
2107         /*
2108          * Ask the vmem areana to reclaim unused memory from its
2109          * quantum caches.
2110          */
2111         if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2112                 vmem_qcache_reap(zio_arena);
2113 }
2114 
2115 static void
2116 arc_reclaim_thread(void)
2117 {
2118         clock_t                 growtime = 0;
2119         arc_reclaim_strategy_t  last_reclaim = ARC_RECLAIM_CONS;
2120         callb_cpr_t             cpr;
2121 
2122         CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2123 
2124         mutex_enter(&arc_reclaim_thr_lock);
2125         while (arc_thread_exit == 0) {
2126                 if (arc_reclaim_needed()) {
2127 
2128                         if (arc_no_grow) {
2129                                 if (last_reclaim == ARC_RECLAIM_CONS) {
2130                                         last_reclaim = ARC_RECLAIM_AGGR;
2131                                 } else {
2132                                         last_reclaim = ARC_RECLAIM_CONS;
2133                                 }
2134                         } else {
2135                                 arc_no_grow = TRUE;
2136                                 last_reclaim = ARC_RECLAIM_AGGR;
2137                                 membar_producer();
2138                         }
2139 
2140                         /* reset the growth delay for every reclaim */
2141                         growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2142 
2143                         arc_kmem_reap_now(last_reclaim);
2144                         arc_warm = B_TRUE;
2145 
2146                 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2147                         arc_no_grow = FALSE;
2148                 }
2149 
2150                 arc_adjust();
2151 
2152                 if (arc_eviction_list != NULL)
2153                         arc_do_user_evicts();
2154 
2155                 /* block until needed, or one second, whichever is shorter */
2156                 CALLB_CPR_SAFE_BEGIN(&cpr);
2157                 (void) cv_timedwait(&arc_reclaim_thr_cv,
2158                     &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2159                 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2160         }
2161 
2162         arc_thread_exit = 0;
2163         cv_broadcast(&arc_reclaim_thr_cv);
2164         CALLB_CPR_EXIT(&cpr);               /* drops arc_reclaim_thr_lock */
2165         thread_exit();
2166 }
2167 
2168 /*
2169  * Adapt arc info given the number of bytes we are trying to add and
2170  * the state that we are comming from.  This function is only called
2171  * when we are adding new content to the cache.
2172  */
2173 static void
2174 arc_adapt(int bytes, arc_state_t *state)
2175 {
2176         int mult;
2177         uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2178 
2179         if (state == arc_l2c_only)
2180                 return;
2181 
2182         ASSERT(bytes > 0);
2183         /*
2184          * Adapt the target size of the MRU list:
2185          *      - if we just hit in the MRU ghost list, then increase
2186          *        the target size of the MRU list.
2187          *      - if we just hit in the MFU ghost list, then increase
2188          *        the target size of the MFU list by decreasing the
2189          *        target size of the MRU list.
2190          */
2191         if (state == arc_mru_ghost) {
2192                 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2193                     1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2194                 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2195 
2196                 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2197         } else if (state == arc_mfu_ghost) {
2198                 uint64_t delta;
2199 
2200                 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2201                     1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2202                 mult = MIN(mult, 10);
2203 
2204                 delta = MIN(bytes * mult, arc_p);
2205                 arc_p = MAX(arc_p_min, arc_p - delta);
2206         }
2207         ASSERT((int64_t)arc_p >= 0);
2208 
2209         if (arc_reclaim_needed()) {
2210                 cv_signal(&arc_reclaim_thr_cv);
2211                 return;
2212         }
2213 
2214         if (arc_no_grow)
2215                 return;
2216 
2217         if (arc_c >= arc_c_max)
2218                 return;
2219 
2220         /*
2221          * If we're within (2 * maxblocksize) bytes of the target
2222          * cache size, increment the target cache size
2223          */
2224         if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2225                 atomic_add_64(&arc_c, (int64_t)bytes);
2226                 if (arc_c > arc_c_max)
2227                         arc_c = arc_c_max;
2228                 else if (state == arc_anon)
2229                         atomic_add_64(&arc_p, (int64_t)bytes);
2230                 if (arc_p > arc_c)
2231                         arc_p = arc_c;
2232         }
2233         ASSERT((int64_t)arc_p >= 0);
2234 }
2235 
2236 /*
2237  * Check if the cache has reached its limits and eviction is required
2238  * prior to insert.
2239  */
2240 static int
2241 arc_evict_needed(arc_buf_contents_t type)
2242 {
2243         if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2244                 return (1);
2245 
2246         if (arc_reclaim_needed())
2247                 return (1);
2248 
2249         return (arc_size > arc_c);
2250 }
2251 
2252 /*
2253  * The buffer, supplied as the first argument, needs a data block.
2254  * So, if we are at cache max, determine which cache should be victimized.
2255  * We have the following cases:
2256  *
2257  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2258  * In this situation if we're out of space, but the resident size of the MFU is
2259  * under the limit, victimize the MFU cache to satisfy this insertion request.
2260  *
2261  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2262  * Here, we've used up all of the available space for the MRU, so we need to
2263  * evict from our own cache instead.  Evict from the set of resident MRU
2264  * entries.
2265  *
2266  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2267  * c minus p represents the MFU space in the cache, since p is the size of the
2268  * cache that is dedicated to the MRU.  In this situation there's still space on
2269  * the MFU side, so the MRU side needs to be victimized.
2270  *
2271  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2272  * MFU's resident set is consuming more space than it has been allotted.  In
2273  * this situation, we must victimize our own cache, the MFU, for this insertion.
2274  */
2275 static void
2276 arc_get_data_buf(arc_buf_t *buf)
2277 {
2278         arc_state_t             *state = buf->b_hdr->b_state;
2279         uint64_t                size = buf->b_hdr->b_size;
2280         arc_buf_contents_t      type = buf->b_hdr->b_type;
2281 
2282         arc_adapt(size, state);
2283 
2284         /*
2285          * We have not yet reached cache maximum size,
2286          * just allocate a new buffer.
2287          */
2288         if (!arc_evict_needed(type)) {
2289                 if (type == ARC_BUFC_METADATA) {
2290                         buf->b_data = zio_buf_alloc(size);
2291                         arc_space_consume(size, ARC_SPACE_DATA);
2292                 } else {
2293                         ASSERT(type == ARC_BUFC_DATA);
2294                         buf->b_data = zio_data_buf_alloc(size);
2295                         ARCSTAT_INCR(arcstat_data_size, size);
2296                         atomic_add_64(&arc_size, size);
2297                 }
2298                 goto out;
2299         }
2300 
2301         /*
2302          * If we are prefetching from the mfu ghost list, this buffer
2303          * will end up on the mru list; so steal space from there.
2304          */
2305         if (state == arc_mfu_ghost)
2306                 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2307         else if (state == arc_mru_ghost)
2308                 state = arc_mru;
2309 
2310         if (state == arc_mru || state == arc_anon) {
2311                 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2312                 state = (arc_mfu->arcs_lsize[type] >= size &&
2313                     arc_p > mru_used) ? arc_mfu : arc_mru;
2314         } else {
2315                 /* MFU cases */
2316                 uint64_t mfu_space = arc_c - arc_p;
2317                 state =  (arc_mru->arcs_lsize[type] >= size &&
2318                     mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2319         }
2320         if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2321                 if (type == ARC_BUFC_METADATA) {
2322                         buf->b_data = zio_buf_alloc(size);
2323                         arc_space_consume(size, ARC_SPACE_DATA);
2324                 } else {
2325                         ASSERT(type == ARC_BUFC_DATA);
2326                         buf->b_data = zio_data_buf_alloc(size);
2327                         ARCSTAT_INCR(arcstat_data_size, size);
2328                         atomic_add_64(&arc_size, size);
2329                 }
2330                 ARCSTAT_BUMP(arcstat_recycle_miss);
2331         }
2332         ASSERT(buf->b_data != NULL);
2333 out:
2334         /*
2335          * Update the state size.  Note that ghost states have a
2336          * "ghost size" and so don't need to be updated.
2337          */
2338         if (!GHOST_STATE(buf->b_hdr->b_state)) {
2339                 arc_buf_hdr_t *hdr = buf->b_hdr;
2340 
2341                 atomic_add_64(&hdr->b_state->arcs_size, size);
2342                 if (list_link_active(&hdr->b_arc_node)) {
2343                         ASSERT(refcount_is_zero(&hdr->b_refcnt));
2344                         atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2345                 }
2346                 /*
2347                  * If we are growing the cache, and we are adding anonymous
2348                  * data, and we have outgrown arc_p, update arc_p
2349                  */
2350                 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2351                     arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2352                         arc_p = MIN(arc_c, arc_p + size);
2353         }
2354 }
2355 
2356 /*
2357  * This routine is called whenever a buffer is accessed.
2358  * NOTE: the hash lock is dropped in this function.
2359  */
2360 static void
2361 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2362 {
2363         clock_t now;
2364 
2365         ASSERT(MUTEX_HELD(hash_lock));
2366 
2367         if (buf->b_state == arc_anon) {
2368                 /*
2369                  * This buffer is not in the cache, and does not
2370                  * appear in our "ghost" list.  Add the new buffer
2371                  * to the MRU state.
2372                  */
2373 
2374                 ASSERT(buf->b_arc_access == 0);
2375                 buf->b_arc_access = ddi_get_lbolt();
2376                 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2377                 arc_change_state(arc_mru, buf, hash_lock);
2378 
2379         } else if (buf->b_state == arc_mru) {
2380                 now = ddi_get_lbolt();
2381 
2382                 /*
2383                  * If this buffer is here because of a prefetch, then either:
2384                  * - clear the flag if this is a "referencing" read
2385                  *   (any subsequent access will bump this into the MFU state).
2386                  * or
2387                  * - move the buffer to the head of the list if this is
2388                  *   another prefetch (to make it less likely to be evicted).
2389                  */
2390                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2391                         if (refcount_count(&buf->b_refcnt) == 0) {
2392                                 ASSERT(list_link_active(&buf->b_arc_node));
2393                         } else {
2394                                 buf->b_flags &= ~ARC_PREFETCH;
2395                                 ARCSTAT_BUMP(arcstat_mru_hits);
2396                         }
2397                         buf->b_arc_access = now;
2398                         return;
2399                 }
2400 
2401                 /*
2402                  * This buffer has been "accessed" only once so far,
2403                  * but it is still in the cache. Move it to the MFU
2404                  * state.
2405                  */
2406                 if (now > buf->b_arc_access + ARC_MINTIME) {
2407                         /*
2408                          * More than 125ms have passed since we
2409                          * instantiated this buffer.  Move it to the
2410                          * most frequently used state.
2411                          */
2412                         buf->b_arc_access = now;
2413                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2414                         arc_change_state(arc_mfu, buf, hash_lock);
2415                 }
2416                 ARCSTAT_BUMP(arcstat_mru_hits);
2417         } else if (buf->b_state == arc_mru_ghost) {
2418                 arc_state_t     *new_state;
2419                 /*
2420                  * This buffer has been "accessed" recently, but
2421                  * was evicted from the cache.  Move it to the
2422                  * MFU state.
2423                  */
2424 
2425                 if (buf->b_flags & ARC_PREFETCH) {
2426                         new_state = arc_mru;
2427                         if (refcount_count(&buf->b_refcnt) > 0)
2428                                 buf->b_flags &= ~ARC_PREFETCH;
2429                         DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2430                 } else {
2431                         new_state = arc_mfu;
2432                         DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2433                 }
2434 
2435                 buf->b_arc_access = ddi_get_lbolt();
2436                 arc_change_state(new_state, buf, hash_lock);
2437 
2438                 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2439         } else if (buf->b_state == arc_mfu) {
2440                 /*
2441                  * This buffer has been accessed more than once and is
2442                  * still in the cache.  Keep it in the MFU state.
2443                  *
2444                  * NOTE: an add_reference() that occurred when we did
2445                  * the arc_read() will have kicked this off the list.
2446                  * If it was a prefetch, we will explicitly move it to
2447                  * the head of the list now.
2448                  */
2449                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2450                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2451                         ASSERT(list_link_active(&buf->b_arc_node));
2452                 }
2453                 ARCSTAT_BUMP(arcstat_mfu_hits);
2454                 buf->b_arc_access = ddi_get_lbolt();
2455         } else if (buf->b_state == arc_mfu_ghost) {
2456                 arc_state_t     *new_state = arc_mfu;
2457                 /*
2458                  * This buffer has been accessed more than once but has
2459                  * been evicted from the cache.  Move it back to the
2460                  * MFU state.
2461                  */
2462 
2463                 if (buf->b_flags & ARC_PREFETCH) {
2464                         /*
2465                          * This is a prefetch access...
2466                          * move this block back to the MRU state.
2467                          */
2468                         ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2469                         new_state = arc_mru;
2470                 }
2471 
2472                 buf->b_arc_access = ddi_get_lbolt();
2473                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2474                 arc_change_state(new_state, buf, hash_lock);
2475 
2476                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2477         } else if (buf->b_state == arc_l2c_only) {
2478                 /*
2479                  * This buffer is on the 2nd Level ARC.
2480                  */
2481 
2482                 buf->b_arc_access = ddi_get_lbolt();
2483                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2484                 arc_change_state(arc_mfu, buf, hash_lock);
2485         } else {
2486                 ASSERT(!"invalid arc state");
2487         }
2488 }
2489 
2490 /* a generic arc_done_func_t which you can use */
2491 /* ARGSUSED */
2492 void
2493 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2494 {
2495         if (zio == NULL || zio->io_error == 0)
2496                 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2497         VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2498 }
2499 
2500 /* a generic arc_done_func_t */
2501 void
2502 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2503 {
2504         arc_buf_t **bufp = arg;
2505         if (zio && zio->io_error) {
2506                 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2507                 *bufp = NULL;
2508         } else {
2509                 *bufp = buf;
2510                 ASSERT(buf->b_data);
2511         }
2512 }
2513 
2514 static void
2515 arc_read_done(zio_t *zio)
2516 {
2517         arc_buf_hdr_t   *hdr, *found;
2518         arc_buf_t       *buf;
2519         arc_buf_t       *abuf;  /* buffer we're assigning to callback */
2520         kmutex_t        *hash_lock;
2521         arc_callback_t  *callback_list, *acb;
2522         int             freeable = FALSE;
2523 
2524         buf = zio->io_private;
2525         hdr = buf->b_hdr;
2526 
2527         /*
2528          * The hdr was inserted into hash-table and removed from lists
2529          * prior to starting I/O.  We should find this header, since
2530          * it's in the hash table, and it should be legit since it's
2531          * not possible to evict it during the I/O.  The only possible
2532          * reason for it not to be found is if we were freed during the
2533          * read.
2534          */
2535         found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2536             &hash_lock);
2537 
2538         ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2539             (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2540             (found == hdr && HDR_L2_READING(hdr)));
2541 
2542         hdr->b_flags &= ~ARC_L2_EVICTED;
2543         if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2544                 hdr->b_flags &= ~ARC_L2CACHE;
2545 
2546         /* byteswap if necessary */
2547         callback_list = hdr->b_acb;
2548         ASSERT(callback_list != NULL);
2549         if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2550                 dmu_object_byteswap_t bswap =
2551                     DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2552                 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2553                     byteswap_uint64_array :
2554                     dmu_ot_byteswap[bswap].ob_func;
2555                 func(buf->b_data, hdr->b_size);
2556         }
2557 
2558         arc_cksum_compute(buf, B_FALSE);
2559 
2560         if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2561                 /*
2562                  * Only call arc_access on anonymous buffers.  This is because
2563                  * if we've issued an I/O for an evicted buffer, we've already
2564                  * called arc_access (to prevent any simultaneous readers from
2565                  * getting confused).
2566                  */
2567                 arc_access(hdr, hash_lock);
2568         }
2569 
2570         /* create copies of the data buffer for the callers */
2571         abuf = buf;
2572         for (acb = callback_list; acb; acb = acb->acb_next) {
2573                 if (acb->acb_done) {
2574                         if (abuf == NULL)
2575                                 abuf = arc_buf_clone(buf);
2576                         acb->acb_buf = abuf;
2577                         abuf = NULL;
2578                 }
2579         }
2580         hdr->b_acb = NULL;
2581         hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2582         ASSERT(!HDR_BUF_AVAILABLE(hdr));
2583         if (abuf == buf) {
2584                 ASSERT(buf->b_efunc == NULL);
2585                 ASSERT(hdr->b_datacnt == 1);
2586                 hdr->b_flags |= ARC_BUF_AVAILABLE;
2587         }
2588 
2589         ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2590 
2591         if (zio->io_error != 0) {
2592                 hdr->b_flags |= ARC_IO_ERROR;
2593                 if (hdr->b_state != arc_anon)
2594                         arc_change_state(arc_anon, hdr, hash_lock);
2595                 if (HDR_IN_HASH_TABLE(hdr))
2596                         buf_hash_remove(hdr);
2597                 freeable = refcount_is_zero(&hdr->b_refcnt);
2598         }
2599 
2600         /*
2601          * Broadcast before we drop the hash_lock to avoid the possibility
2602          * that the hdr (and hence the cv) might be freed before we get to
2603          * the cv_broadcast().
2604          */
2605         cv_broadcast(&hdr->b_cv);
2606 
2607         if (hash_lock) {
2608                 mutex_exit(hash_lock);
2609         } else {
2610                 /*
2611                  * This block was freed while we waited for the read to
2612                  * complete.  It has been removed from the hash table and
2613                  * moved to the anonymous state (so that it won't show up
2614                  * in the cache).
2615                  */
2616                 ASSERT3P(hdr->b_state, ==, arc_anon);
2617                 freeable = refcount_is_zero(&hdr->b_refcnt);
2618         }
2619 
2620         /* execute each callback and free its structure */
2621         while ((acb = callback_list) != NULL) {
2622                 if (acb->acb_done)
2623                         acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2624 
2625                 if (acb->acb_zio_dummy != NULL) {
2626                         acb->acb_zio_dummy->io_error = zio->io_error;
2627                         zio_nowait(acb->acb_zio_dummy);
2628                 }
2629 
2630                 callback_list = acb->acb_next;
2631                 kmem_free(acb, sizeof (arc_callback_t));
2632         }
2633 
2634         if (freeable)
2635                 arc_hdr_destroy(hdr);
2636 }
2637 
2638 /*
2639  * "Read" the block at the specified DVA (in bp) via the
2640  * cache.  If the block is found in the cache, invoke the provided
2641  * callback immediately and return.  Note that the `zio' parameter
2642  * in the callback will be NULL in this case, since no IO was
2643  * required.  If the block is not in the cache pass the read request
2644  * on to the spa with a substitute callback function, so that the
2645  * requested block will be added to the cache.
2646  *
2647  * If a read request arrives for a block that has a read in-progress,
2648  * either wait for the in-progress read to complete (and return the
2649  * results); or, if this is a read with a "done" func, add a record
2650  * to the read to invoke the "done" func when the read completes,
2651  * and return; or just return.
2652  *
2653  * arc_read_done() will invoke all the requested "done" functions
2654  * for readers of this block.
2655  *
2656  * Normal callers should use arc_read and pass the arc buffer and offset
2657  * for the bp.  But if you know you don't need locking, you can use
2658  * arc_read_bp.
2659  */
2660 int
2661 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf,
2662     arc_done_func_t *done, void *private, int priority, int zio_flags,
2663     uint32_t *arc_flags, const zbookmark_t *zb)
2664 {
2665         int err;
2666 
2667         if (pbuf == NULL) {
2668                 /*
2669                  * XXX This happens from traverse callback funcs, for
2670                  * the objset_phys_t block.
2671                  */
2672                 return (arc_read_nolock(pio, spa, bp, done, private, priority,
2673                     zio_flags, arc_flags, zb));
2674         }
2675 
2676         ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2677         ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2678         rw_enter(&pbuf->b_data_lock, RW_READER);
2679 
2680         err = arc_read_nolock(pio, spa, bp, done, private, priority,
2681             zio_flags, arc_flags, zb);
2682         rw_exit(&pbuf->b_data_lock);
2683 
2684         return (err);
2685 }
2686 
2687 int
2688 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp,
2689     arc_done_func_t *done, void *private, int priority, int zio_flags,
2690     uint32_t *arc_flags, const zbookmark_t *zb)
2691 {
2692         arc_buf_hdr_t *hdr;
2693         arc_buf_t *buf;
2694         kmutex_t *hash_lock;
2695         zio_t *rzio;
2696         uint64_t guid = spa_load_guid(spa);
2697 
2698 top:
2699         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2700             &hash_lock);
2701         if (hdr && hdr->b_datacnt > 0) {
2702 
2703                 *arc_flags |= ARC_CACHED;
2704 
2705                 if (HDR_IO_IN_PROGRESS(hdr)) {
2706 
2707                         if (*arc_flags & ARC_WAIT) {
2708                                 cv_wait(&hdr->b_cv, hash_lock);
2709                                 mutex_exit(hash_lock);
2710                                 goto top;
2711                         }
2712                         ASSERT(*arc_flags & ARC_NOWAIT);
2713 
2714                         if (done) {
2715                                 arc_callback_t  *acb = NULL;
2716 
2717                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2718                                     KM_SLEEP);
2719                                 acb->acb_done = done;
2720                                 acb->acb_private = private;
2721                                 if (pio != NULL)
2722                                         acb->acb_zio_dummy = zio_null(pio,
2723                                             spa, NULL, NULL, NULL, zio_flags);
2724 
2725                                 ASSERT(acb->acb_done != NULL);
2726                                 acb->acb_next = hdr->b_acb;
2727                                 hdr->b_acb = acb;
2728                                 add_reference(hdr, hash_lock, private);
2729                                 mutex_exit(hash_lock);
2730                                 return (0);
2731                         }
2732                         mutex_exit(hash_lock);
2733                         return (0);
2734                 }
2735 
2736                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2737 
2738                 if (done) {
2739                         add_reference(hdr, hash_lock, private);
2740                         /*
2741                          * If this block is already in use, create a new
2742                          * copy of the data so that we will be guaranteed
2743                          * that arc_release() will always succeed.
2744                          */
2745                         buf = hdr->b_buf;
2746                         ASSERT(buf);
2747                         ASSERT(buf->b_data);
2748                         if (HDR_BUF_AVAILABLE(hdr)) {
2749                                 ASSERT(buf->b_efunc == NULL);
2750                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2751                         } else {
2752                                 buf = arc_buf_clone(buf);
2753                         }
2754 
2755                 } else if (*arc_flags & ARC_PREFETCH &&
2756                     refcount_count(&hdr->b_refcnt) == 0) {
2757                         hdr->b_flags |= ARC_PREFETCH;
2758                 }
2759                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2760                 arc_access(hdr, hash_lock);
2761                 if (*arc_flags & ARC_L2CACHE)
2762                         hdr->b_flags |= ARC_L2CACHE;
2763                 mutex_exit(hash_lock);
2764                 ARCSTAT_BUMP(arcstat_hits);
2765                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2766                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2767                     data, metadata, hits);
2768 
2769                 if (done)
2770                         done(NULL, buf, private);
2771         } else {
2772                 uint64_t size = BP_GET_LSIZE(bp);
2773                 arc_callback_t  *acb;
2774                 vdev_t *vd = NULL;
2775                 uint64_t addr;
2776                 boolean_t devw = B_FALSE;
2777 
2778                 if (hdr == NULL) {
2779                         /* this block is not in the cache */
2780                         arc_buf_hdr_t   *exists;
2781                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2782                         buf = arc_buf_alloc(spa, size, private, type);
2783                         hdr = buf->b_hdr;
2784                         hdr->b_dva = *BP_IDENTITY(bp);
2785                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2786                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2787                         exists = buf_hash_insert(hdr, &hash_lock);
2788                         if (exists) {
2789                                 /* somebody beat us to the hash insert */
2790                                 mutex_exit(hash_lock);
2791                                 buf_discard_identity(hdr);
2792                                 (void) arc_buf_remove_ref(buf, private);
2793                                 goto top; /* restart the IO request */
2794                         }
2795                         /* if this is a prefetch, we don't have a reference */
2796                         if (*arc_flags & ARC_PREFETCH) {
2797                                 (void) remove_reference(hdr, hash_lock,
2798                                     private);
2799                                 hdr->b_flags |= ARC_PREFETCH;
2800                         }
2801                         if (*arc_flags & ARC_L2CACHE)
2802                                 hdr->b_flags |= ARC_L2CACHE;
2803                         if (BP_GET_LEVEL(bp) > 0)
2804                                 hdr->b_flags |= ARC_INDIRECT;
2805                 } else {
2806                         /* this block is in the ghost cache */
2807                         ASSERT(GHOST_STATE(hdr->b_state));
2808                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2809                         ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2810                         ASSERT(hdr->b_buf == NULL);
2811 
2812                         /* if this is a prefetch, we don't have a reference */
2813                         if (*arc_flags & ARC_PREFETCH)
2814                                 hdr->b_flags |= ARC_PREFETCH;
2815                         else
2816                                 add_reference(hdr, hash_lock, private);
2817                         if (*arc_flags & ARC_L2CACHE)
2818                                 hdr->b_flags |= ARC_L2CACHE;
2819                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2820                         buf->b_hdr = hdr;
2821                         buf->b_data = NULL;
2822                         buf->b_efunc = NULL;
2823                         buf->b_private = NULL;
2824                         buf->b_next = NULL;
2825                         hdr->b_buf = buf;
2826                         ASSERT(hdr->b_datacnt == 0);
2827                         hdr->b_datacnt = 1;
2828                         arc_get_data_buf(buf);
2829                         arc_access(hdr, hash_lock);
2830                 }
2831 
2832                 ASSERT(!GHOST_STATE(hdr->b_state));
2833 
2834                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2835                 acb->acb_done = done;
2836                 acb->acb_private = private;
2837 
2838                 ASSERT(hdr->b_acb == NULL);
2839                 hdr->b_acb = acb;
2840                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2841 
2842                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2843                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2844                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2845                         addr = hdr->b_l2hdr->b_daddr;
2846                         /*
2847                          * Lock out device removal.
2848                          */
2849                         if (vdev_is_dead(vd) ||
2850                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2851                                 vd = NULL;
2852                 }
2853 
2854                 mutex_exit(hash_lock);
2855 
2856                 ASSERT3U(hdr->b_size, ==, size);
2857                 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
2858                     uint64_t, size, zbookmark_t *, zb);
2859                 ARCSTAT_BUMP(arcstat_misses);
2860                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2861                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2862                     data, metadata, misses);
2863 
2864                 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2865                         /*
2866                          * Read from the L2ARC if the following are true:
2867                          * 1. The L2ARC vdev was previously cached.
2868                          * 2. This buffer still has L2ARC metadata.
2869                          * 3. This buffer isn't currently writing to the L2ARC.
2870                          * 4. The L2ARC entry wasn't evicted, which may
2871                          *    also have invalidated the vdev.
2872                          * 5. This isn't prefetch and l2arc_noprefetch is set.
2873                          */
2874                         if (hdr->b_l2hdr != NULL &&
2875                             !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2876                             !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2877                                 l2arc_read_callback_t *cb;
2878 
2879                                 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2880                                 ARCSTAT_BUMP(arcstat_l2_hits);
2881 
2882                                 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2883                                     KM_SLEEP);
2884                                 cb->l2rcb_buf = buf;
2885                                 cb->l2rcb_spa = spa;
2886                                 cb->l2rcb_bp = *bp;
2887                                 cb->l2rcb_zb = *zb;
2888                                 cb->l2rcb_flags = zio_flags;
2889 
2890                                 /*
2891                                  * l2arc read.  The SCL_L2ARC lock will be
2892                                  * released by l2arc_read_done().
2893                                  */
2894                                 rzio = zio_read_phys(pio, vd, addr, size,
2895                                     buf->b_data, ZIO_CHECKSUM_OFF,
2896                                     l2arc_read_done, cb, priority, zio_flags |
2897                                     ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2898                                     ZIO_FLAG_DONT_PROPAGATE |
2899                                     ZIO_FLAG_DONT_RETRY, B_FALSE);
2900                                 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2901                                     zio_t *, rzio);
2902                                 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
2903 
2904                                 if (*arc_flags & ARC_NOWAIT) {
2905                                         zio_nowait(rzio);
2906                                         return (0);
2907                                 }
2908 
2909                                 ASSERT(*arc_flags & ARC_WAIT);
2910                                 if (zio_wait(rzio) == 0)
2911                                         return (0);
2912 
2913                                 /* l2arc read error; goto zio_read() */
2914                         } else {
2915                                 DTRACE_PROBE1(l2arc__miss,
2916                                     arc_buf_hdr_t *, hdr);
2917                                 ARCSTAT_BUMP(arcstat_l2_misses);
2918                                 if (HDR_L2_WRITING(hdr))
2919                                         ARCSTAT_BUMP(arcstat_l2_rw_clash);
2920                                 spa_config_exit(spa, SCL_L2ARC, vd);
2921                         }
2922                 } else {
2923                         if (vd != NULL)
2924                                 spa_config_exit(spa, SCL_L2ARC, vd);
2925                         if (l2arc_ndev != 0) {
2926                                 DTRACE_PROBE1(l2arc__miss,
2927                                     arc_buf_hdr_t *, hdr);
2928                                 ARCSTAT_BUMP(arcstat_l2_misses);
2929                         }
2930                 }
2931 
2932                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2933                     arc_read_done, buf, priority, zio_flags, zb);
2934 
2935                 if (*arc_flags & ARC_WAIT)
2936                         return (zio_wait(rzio));
2937 
2938                 ASSERT(*arc_flags & ARC_NOWAIT);
2939                 zio_nowait(rzio);
2940         }
2941         return (0);
2942 }
2943 
2944 void
2945 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2946 {
2947         ASSERT(buf->b_hdr != NULL);
2948         ASSERT(buf->b_hdr->b_state != arc_anon);
2949         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2950         ASSERT(buf->b_efunc == NULL);
2951         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
2952 
2953         buf->b_efunc = func;
2954         buf->b_private = private;
2955 }
2956 
2957 /*
2958  * This is used by the DMU to let the ARC know that a buffer is
2959  * being evicted, so the ARC should clean up.  If this arc buf
2960  * is not yet in the evicted state, it will be put there.
2961  */
2962 int
2963 arc_buf_evict(arc_buf_t *buf)
2964 {
2965         arc_buf_hdr_t *hdr;
2966         kmutex_t *hash_lock;
2967         arc_buf_t **bufp;
2968 
2969         mutex_enter(&buf->b_evict_lock);
2970         hdr = buf->b_hdr;
2971         if (hdr == NULL) {
2972                 /*
2973                  * We are in arc_do_user_evicts().
2974                  */
2975                 ASSERT(buf->b_data == NULL);
2976                 mutex_exit(&buf->b_evict_lock);
2977                 return (0);
2978         } else if (buf->b_data == NULL) {
2979                 arc_buf_t copy = *buf; /* structure assignment */
2980                 /*
2981                  * We are on the eviction list; process this buffer now
2982                  * but let arc_do_user_evicts() do the reaping.
2983                  */
2984                 buf->b_efunc = NULL;
2985                 mutex_exit(&buf->b_evict_lock);
2986                 VERIFY(copy.b_efunc(&copy) == 0);
2987                 return (1);
2988         }
2989         hash_lock = HDR_LOCK(hdr);
2990         mutex_enter(hash_lock);
2991         hdr = buf->b_hdr;
2992         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2993 
2994         ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2995         ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2996 
2997         /*
2998          * Pull this buffer off of the hdr
2999          */
3000         bufp = &hdr->b_buf;
3001         while (*bufp != buf)
3002                 bufp = &(*bufp)->b_next;
3003         *bufp = buf->b_next;
3004 
3005         ASSERT(buf->b_data != NULL);
3006         arc_buf_destroy(buf, FALSE, FALSE);
3007 
3008         if (hdr->b_datacnt == 0) {
3009                 arc_state_t *old_state = hdr->b_state;
3010                 arc_state_t *evicted_state;
3011 
3012                 ASSERT(hdr->b_buf == NULL);
3013                 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3014 
3015                 evicted_state =
3016                     (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3017 
3018                 mutex_enter(&old_state->arcs_mtx);
3019                 mutex_enter(&evicted_state->arcs_mtx);
3020 
3021                 arc_change_state(evicted_state, hdr, hash_lock);
3022                 ASSERT(HDR_IN_HASH_TABLE(hdr));
3023                 hdr->b_flags |= ARC_IN_HASH_TABLE;
3024                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3025 
3026                 mutex_exit(&evicted_state->arcs_mtx);
3027                 mutex_exit(&old_state->arcs_mtx);
3028         }
3029         mutex_exit(hash_lock);
3030         mutex_exit(&buf->b_evict_lock);
3031 
3032         VERIFY(buf->b_efunc(buf) == 0);
3033         buf->b_efunc = NULL;
3034         buf->b_private = NULL;
3035         buf->b_hdr = NULL;
3036         buf->b_next = NULL;
3037         kmem_cache_free(buf_cache, buf);
3038         return (1);
3039 }
3040 
3041 /*
3042  * Release this buffer from the cache.  This must be done
3043  * after a read and prior to modifying the buffer contents.
3044  * If the buffer has more than one reference, we must make
3045  * a new hdr for the buffer.
3046  */
3047 void
3048 arc_release(arc_buf_t *buf, void *tag)
3049 {
3050         arc_buf_hdr_t *hdr;
3051         kmutex_t *hash_lock = NULL;
3052         l2arc_buf_hdr_t *l2hdr;
3053         uint64_t buf_size;
3054 
3055         /*
3056          * It would be nice to assert that if it's DMU metadata (level >
3057          * 0 || it's the dnode file), then it must be syncing context.
3058          * But we don't know that information at this level.
3059          */
3060 
3061         mutex_enter(&buf->b_evict_lock);
3062         hdr = buf->b_hdr;
3063 
3064         /* this buffer is not on any list */
3065         ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3066 
3067         if (hdr->b_state == arc_anon) {
3068                 /* this buffer is already released */
3069                 ASSERT(buf->b_efunc == NULL);
3070         } else {
3071                 hash_lock = HDR_LOCK(hdr);
3072                 mutex_enter(hash_lock);
3073                 hdr = buf->b_hdr;
3074                 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3075         }
3076 
3077         l2hdr = hdr->b_l2hdr;
3078         if (l2hdr) {
3079                 mutex_enter(&l2arc_buflist_mtx);
3080                 hdr->b_l2hdr = NULL;
3081                 buf_size = hdr->b_size;
3082         }
3083 
3084         /*
3085          * Do we have more than one buf?
3086          */
3087         if (hdr->b_datacnt > 1) {
3088                 arc_buf_hdr_t *nhdr;
3089                 arc_buf_t **bufp;
3090                 uint64_t blksz = hdr->b_size;
3091                 uint64_t spa = hdr->b_spa;
3092                 arc_buf_contents_t type = hdr->b_type;
3093                 uint32_t flags = hdr->b_flags;
3094 
3095                 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3096                 /*
3097                  * Pull the data off of this hdr and attach it to
3098                  * a new anonymous hdr.
3099                  */
3100                 (void) remove_reference(hdr, hash_lock, tag);
3101                 bufp = &hdr->b_buf;
3102                 while (*bufp != buf)
3103                         bufp = &(*bufp)->b_next;
3104                 *bufp = buf->b_next;
3105                 buf->b_next = NULL;
3106 
3107                 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3108                 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3109                 if (refcount_is_zero(&hdr->b_refcnt)) {
3110                         uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3111                         ASSERT3U(*size, >=, hdr->b_size);
3112                         atomic_add_64(size, -hdr->b_size);
3113                 }
3114                 hdr->b_datacnt -= 1;
3115                 arc_cksum_verify(buf);
3116 
3117                 mutex_exit(hash_lock);
3118 
3119                 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3120                 nhdr->b_size = blksz;
3121                 nhdr->b_spa = spa;
3122                 nhdr->b_type = type;
3123                 nhdr->b_buf = buf;
3124                 nhdr->b_state = arc_anon;
3125                 nhdr->b_arc_access = 0;
3126                 nhdr->b_flags = flags & ARC_L2_WRITING;
3127                 nhdr->b_l2hdr = NULL;
3128                 nhdr->b_datacnt = 1;
3129                 nhdr->b_freeze_cksum = NULL;
3130                 (void) refcount_add(&nhdr->b_refcnt, tag);
3131                 buf->b_hdr = nhdr;
3132                 mutex_exit(&buf->b_evict_lock);
3133                 atomic_add_64(&arc_anon->arcs_size, blksz);
3134         } else {
3135                 mutex_exit(&buf->b_evict_lock);
3136                 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3137                 ASSERT(!list_link_active(&hdr->b_arc_node));
3138                 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3139                 if (hdr->b_state != arc_anon)
3140                         arc_change_state(arc_anon, hdr, hash_lock);
3141                 hdr->b_arc_access = 0;
3142                 if (hash_lock)
3143                         mutex_exit(hash_lock);
3144 
3145                 buf_discard_identity(hdr);
3146                 arc_buf_thaw(buf);
3147         }
3148         buf->b_efunc = NULL;
3149         buf->b_private = NULL;
3150 
3151         if (l2hdr) {
3152                 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3153                 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3154                 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3155                 mutex_exit(&l2arc_buflist_mtx);
3156         }
3157 }
3158 
3159 /*
3160  * Release this buffer.  If it does not match the provided BP, fill it
3161  * with that block's contents.
3162  */
3163 /* ARGSUSED */
3164 int
3165 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa,
3166     zbookmark_t *zb)
3167 {
3168         arc_release(buf, tag);
3169         return (0);
3170 }
3171 
3172 int
3173 arc_released(arc_buf_t *buf)
3174 {
3175         int released;
3176 
3177         mutex_enter(&buf->b_evict_lock);
3178         released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3179         mutex_exit(&buf->b_evict_lock);
3180         return (released);
3181 }
3182 
3183 int
3184 arc_has_callback(arc_buf_t *buf)
3185 {
3186         int callback;
3187 
3188         mutex_enter(&buf->b_evict_lock);
3189         callback = (buf->b_efunc != NULL);
3190         mutex_exit(&buf->b_evict_lock);
3191         return (callback);
3192 }
3193 
3194 #ifdef ZFS_DEBUG
3195 int
3196 arc_referenced(arc_buf_t *buf)
3197 {
3198         int referenced;
3199 
3200         mutex_enter(&buf->b_evict_lock);
3201         referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3202         mutex_exit(&buf->b_evict_lock);
3203         return (referenced);
3204 }
3205 #endif
3206 
3207 static void
3208 arc_write_ready(zio_t *zio)
3209 {
3210         arc_write_callback_t *callback = zio->io_private;
3211         arc_buf_t *buf = callback->awcb_buf;
3212         arc_buf_hdr_t *hdr = buf->b_hdr;
3213 
3214         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3215         callback->awcb_ready(zio, buf, callback->awcb_private);
3216 
3217         /*
3218          * If the IO is already in progress, then this is a re-write
3219          * attempt, so we need to thaw and re-compute the cksum.
3220          * It is the responsibility of the callback to handle the
3221          * accounting for any re-write attempt.
3222          */
3223         if (HDR_IO_IN_PROGRESS(hdr)) {
3224                 mutex_enter(&hdr->b_freeze_lock);
3225                 if (hdr->b_freeze_cksum != NULL) {
3226                         kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3227                         hdr->b_freeze_cksum = NULL;
3228                 }
3229                 mutex_exit(&hdr->b_freeze_lock);
3230         }
3231         arc_cksum_compute(buf, B_FALSE);
3232         hdr->b_flags |= ARC_IO_IN_PROGRESS;
3233 }
3234 
3235 static void
3236 arc_write_done(zio_t *zio)
3237 {
3238         arc_write_callback_t *callback = zio->io_private;
3239         arc_buf_t *buf = callback->awcb_buf;
3240         arc_buf_hdr_t *hdr = buf->b_hdr;
3241 
3242         ASSERT(hdr->b_acb == NULL);
3243 
3244         if (zio->io_error == 0) {
3245                 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3246                 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3247                 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3248         } else {
3249                 ASSERT(BUF_EMPTY(hdr));
3250         }
3251 
3252         /*
3253          * If the block to be written was all-zero, we may have
3254          * compressed it away.  In this case no write was performed
3255          * so there will be no dva/birth/checksum.  The buffer must
3256          * therefore remain anonymous (and uncached).
3257          */
3258         if (!BUF_EMPTY(hdr)) {
3259                 arc_buf_hdr_t *exists;
3260                 kmutex_t *hash_lock;
3261 
3262                 ASSERT(zio->io_error == 0);
3263 
3264                 arc_cksum_verify(buf);
3265 
3266                 exists = buf_hash_insert(hdr, &hash_lock);
3267                 if (exists) {
3268                         /*
3269                          * This can only happen if we overwrite for
3270                          * sync-to-convergence, because we remove
3271                          * buffers from the hash table when we arc_free().
3272                          */
3273                         if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3274                                 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3275                                         panic("bad overwrite, hdr=%p exists=%p",
3276                                             (void *)hdr, (void *)exists);
3277                                 ASSERT(refcount_is_zero(&exists->b_refcnt));
3278                                 arc_change_state(arc_anon, exists, hash_lock);
3279                                 mutex_exit(hash_lock);
3280                                 arc_hdr_destroy(exists);
3281                                 exists = buf_hash_insert(hdr, &hash_lock);
3282                                 ASSERT3P(exists, ==, NULL);
3283                         } else {
3284                                 /* Dedup */
3285                                 ASSERT(hdr->b_datacnt == 1);
3286                                 ASSERT(hdr->b_state == arc_anon);
3287                                 ASSERT(BP_GET_DEDUP(zio->io_bp));
3288                                 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3289                         }
3290                 }
3291                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3292                 /* if it's not anon, we are doing a scrub */
3293                 if (!exists && hdr->b_state == arc_anon)
3294                         arc_access(hdr, hash_lock);
3295                 mutex_exit(hash_lock);
3296         } else {
3297                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3298         }
3299 
3300         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3301         callback->awcb_done(zio, buf, callback->awcb_private);
3302 
3303         kmem_free(callback, sizeof (arc_write_callback_t));
3304 }
3305 
3306 zio_t *
3307 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3308     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3309     arc_done_func_t *ready, arc_done_func_t *done, void *private,
3310     int priority, int zio_flags, const zbookmark_t *zb)
3311 {
3312         arc_buf_hdr_t *hdr = buf->b_hdr;
3313         arc_write_callback_t *callback;
3314         zio_t *zio;
3315 
3316         ASSERT(ready != NULL);
3317         ASSERT(done != NULL);
3318         ASSERT(!HDR_IO_ERROR(hdr));
3319         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3320         ASSERT(hdr->b_acb == NULL);
3321         if (l2arc)
3322                 hdr->b_flags |= ARC_L2CACHE;
3323         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3324         callback->awcb_ready = ready;
3325         callback->awcb_done = done;
3326         callback->awcb_private = private;
3327         callback->awcb_buf = buf;
3328 
3329         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3330             arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3331 
3332         return (zio);
3333 }
3334 
3335 static int
3336 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3337 {
3338 #ifdef _KERNEL
3339         uint64_t available_memory = ptob(freemem);
3340         static uint64_t page_load = 0;
3341         static uint64_t last_txg = 0;
3342 
3343 #if defined(__i386)
3344         available_memory =
3345             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3346 #endif
3347         if (available_memory >= zfs_write_limit_max)
3348                 return (0);
3349 
3350         if (txg > last_txg) {
3351                 last_txg = txg;
3352                 page_load = 0;
3353         }
3354         /*
3355          * If we are in pageout, we know that memory is already tight,
3356          * the arc is already going to be evicting, so we just want to
3357          * continue to let page writes occur as quickly as possible.
3358          */
3359         if (curproc == proc_pageout) {
3360                 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3361                         return (ERESTART);
3362                 /* Note: reserve is inflated, so we deflate */
3363                 page_load += reserve / 8;
3364                 return (0);
3365         } else if (page_load > 0 && arc_reclaim_needed()) {
3366                 /* memory is low, delay before restarting */
3367                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3368                 return (EAGAIN);
3369         }
3370         page_load = 0;
3371 
3372         if (arc_size > arc_c_min) {
3373                 uint64_t evictable_memory =
3374                     arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3375                     arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3376                     arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3377                     arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3378                 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3379         }
3380 
3381         if (inflight_data > available_memory / 4) {
3382                 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3383                 return (ERESTART);
3384         }
3385 #endif
3386         return (0);
3387 }
3388 
3389 void
3390 arc_tempreserve_clear(uint64_t reserve)
3391 {
3392         atomic_add_64(&arc_tempreserve, -reserve);
3393         ASSERT((int64_t)arc_tempreserve >= 0);
3394 }
3395 
3396 int
3397 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3398 {
3399         int error;
3400         uint64_t anon_size;
3401 
3402 #ifdef ZFS_DEBUG
3403         /*
3404          * Once in a while, fail for no reason.  Everything should cope.
3405          */
3406         if (spa_get_random(10000) == 0) {
3407                 dprintf("forcing random failure\n");
3408                 return (ERESTART);
3409         }
3410 #endif
3411         if (reserve > arc_c/4 && !arc_no_grow)
3412                 arc_c = MIN(arc_c_max, reserve * 4);
3413         if (reserve > arc_c)
3414                 return (ENOMEM);
3415 
3416         /*
3417          * Don't count loaned bufs as in flight dirty data to prevent long
3418          * network delays from blocking transactions that are ready to be
3419          * assigned to a txg.
3420          */
3421         anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3422 
3423         /*
3424          * Writes will, almost always, require additional memory allocations
3425          * in order to compress/encrypt/etc the data.  We therefor need to
3426          * make sure that there is sufficient available memory for this.
3427          */
3428         if (error = arc_memory_throttle(reserve, anon_size, txg))
3429                 return (error);
3430 
3431         /*
3432          * Throttle writes when the amount of dirty data in the cache
3433          * gets too large.  We try to keep the cache less than half full
3434          * of dirty blocks so that our sync times don't grow too large.
3435          * Note: if two requests come in concurrently, we might let them
3436          * both succeed, when one of them should fail.  Not a huge deal.
3437          */
3438 
3439         if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3440             anon_size > arc_c / 4) {
3441                 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3442                     "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3443                     arc_tempreserve>>10,
3444                     arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3445                     arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3446                     reserve>>10, arc_c>>10);
3447                 return (ERESTART);
3448         }
3449         atomic_add_64(&arc_tempreserve, reserve);
3450         return (0);
3451 }
3452 
3453 void
3454 arc_init(void)
3455 {
3456         mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3457         cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3458 
3459         /* Convert seconds to clock ticks */
3460         arc_min_prefetch_lifespan = 1 * hz;
3461 
3462         /* Start out with 1/8 of all memory */
3463         arc_c = physmem * PAGESIZE / 8;
3464 
3465 #ifdef _KERNEL
3466         /*
3467          * On architectures where the physical memory can be larger
3468          * than the addressable space (intel in 32-bit mode), we may
3469          * need to limit the cache to 1/8 of VM size.
3470          */
3471         arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3472 #endif
3473 
3474         /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3475         arc_c_min = MAX(arc_c / 4, 64<<20);
3476         /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3477         if (arc_c * 8 >= 1<<30)
3478                 arc_c_max = (arc_c * 8) - (1<<30);
3479         else
3480                 arc_c_max = arc_c_min;
3481         arc_c_max = MAX(arc_c * 6, arc_c_max);
3482 
3483         /*
3484          * Allow the tunables to override our calculations if they are
3485          * reasonable (ie. over 64MB)
3486          */
3487         if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3488                 arc_c_max = zfs_arc_max;
3489         if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3490                 arc_c_min = zfs_arc_min;
3491 
3492         arc_c = arc_c_max;
3493         arc_p = (arc_c >> 1);
3494 
3495         /* limit meta-data to 1/4 of the arc capacity */
3496         arc_meta_limit = arc_c_max / 4;
3497 
3498         /* Allow the tunable to override if it is reasonable */
3499         if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3500                 arc_meta_limit = zfs_arc_meta_limit;
3501 
3502         if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3503                 arc_c_min = arc_meta_limit / 2;
3504 
3505         if (zfs_arc_grow_retry > 0)
3506                 arc_grow_retry = zfs_arc_grow_retry;
3507 
3508         if (zfs_arc_shrink_shift > 0)
3509                 arc_shrink_shift = zfs_arc_shrink_shift;
3510 
3511         if (zfs_arc_p_min_shift > 0)
3512                 arc_p_min_shift = zfs_arc_p_min_shift;
3513 
3514         /* if kmem_flags are set, lets try to use less memory */
3515         if (kmem_debugging())
3516                 arc_c = arc_c / 2;
3517         if (arc_c < arc_c_min)
3518                 arc_c = arc_c_min;
3519 
3520         arc_anon = &ARC_anon;
3521         arc_mru = &ARC_mru;
3522         arc_mru_ghost = &ARC_mru_ghost;
3523         arc_mfu = &ARC_mfu;
3524         arc_mfu_ghost = &ARC_mfu_ghost;
3525         arc_l2c_only = &ARC_l2c_only;
3526         arc_size = 0;
3527 
3528         mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3529         mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3530         mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3531         mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3532         mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3533         mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3534 
3535         list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3536             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3537         list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3538             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3539         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3540             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3541         list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3542             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3543         list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3544             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3545         list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3546             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3547         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3548             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3549         list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3550             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3551         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3552             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3553         list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3554             sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3555 
3556         buf_init();
3557 
3558         arc_thread_exit = 0;
3559         arc_eviction_list = NULL;
3560         mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3561         bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3562 
3563         arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3564             sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3565 
3566         if (arc_ksp != NULL) {
3567                 arc_ksp->ks_data = &arc_stats;
3568                 kstat_install(arc_ksp);
3569         }
3570 
3571         (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3572             TS_RUN, minclsyspri);
3573 
3574         arc_dead = FALSE;
3575         arc_warm = B_FALSE;
3576 
3577         if (zfs_write_limit_max == 0)
3578                 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3579         else
3580                 zfs_write_limit_shift = 0;
3581         mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3582 }
3583 
3584 void
3585 arc_fini(void)
3586 {
3587         mutex_enter(&arc_reclaim_thr_lock);
3588         arc_thread_exit = 1;
3589         while (arc_thread_exit != 0)
3590                 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3591         mutex_exit(&arc_reclaim_thr_lock);
3592 
3593         arc_flush(NULL);
3594 
3595         arc_dead = TRUE;
3596 
3597         if (arc_ksp != NULL) {
3598                 kstat_delete(arc_ksp);
3599                 arc_ksp = NULL;
3600         }
3601 
3602         mutex_destroy(&arc_eviction_mtx);
3603         mutex_destroy(&arc_reclaim_thr_lock);
3604         cv_destroy(&arc_reclaim_thr_cv);
3605 
3606         list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3607         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3608         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3609         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3610         list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3611         list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3612         list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3613         list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3614 
3615         mutex_destroy(&arc_anon->arcs_mtx);
3616         mutex_destroy(&arc_mru->arcs_mtx);
3617         mutex_destroy(&arc_mru_ghost->arcs_mtx);
3618         mutex_destroy(&arc_mfu->arcs_mtx);
3619         mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3620         mutex_destroy(&arc_l2c_only->arcs_mtx);
3621 
3622         mutex_destroy(&zfs_write_limit_lock);
3623 
3624         buf_fini();
3625 
3626         ASSERT(arc_loaned_bytes == 0);
3627 }
3628 
3629 /*
3630  * Level 2 ARC
3631  *
3632  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3633  * It uses dedicated storage devices to hold cached data, which are populated
3634  * using large infrequent writes.  The main role of this cache is to boost
3635  * the performance of random read workloads.  The intended L2ARC devices
3636  * include short-stroked disks, solid state disks, and other media with
3637  * substantially faster read latency than disk.
3638  *
3639  *                 +-----------------------+
3640  *                 |         ARC           |
3641  *                 +-----------------------+
3642  *                    |         ^     ^
3643  *                    |         |     |
3644  *      l2arc_feed_thread()    arc_read()
3645  *                    |         |     |
3646  *                    |  l2arc read   |
3647  *                    V         |     |
3648  *               +---------------+    |
3649  *               |     L2ARC     |    |
3650  *               +---------------+    |
3651  *                   |    ^           |
3652  *          l2arc_write() |           |
3653  *                   |    |           |
3654  *                   V    |           |
3655  *                 +-------+      +-------+
3656  *                 | vdev  |      | vdev  |
3657  *                 | cache |      | cache |
3658  *                 +-------+      +-------+
3659  *                 +=========+     .-----.
3660  *                 :  L2ARC  :    |-_____-|
3661  *                 : devices :    | Disks |
3662  *                 +=========+    `-_____-'
3663  *
3664  * Read requests are satisfied from the following sources, in order:
3665  *
3666  *      1) ARC
3667  *      2) vdev cache of L2ARC devices
3668  *      3) L2ARC devices
3669  *      4) vdev cache of disks
3670  *      5) disks
3671  *
3672  * Some L2ARC device types exhibit extremely slow write performance.
3673  * To accommodate for this there are some significant differences between
3674  * the L2ARC and traditional cache design:
3675  *
3676  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3677  * the ARC behave as usual, freeing buffers and placing headers on ghost
3678  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3679  * this would add inflated write latencies for all ARC memory pressure.
3680  *
3681  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3682  * It does this by periodically scanning buffers from the eviction-end of
3683  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3684  * not already there.  It scans until a headroom of buffers is satisfied,
3685  * which itself is a buffer for ARC eviction.  The thread that does this is
3686  * l2arc_feed_thread(), illustrated below; example sizes are included to
3687  * provide a better sense of ratio than this diagram:
3688  *
3689  *             head -->                        tail
3690  *              +---------------------+----------+
3691  *      ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3692  *              +---------------------+----------+   |   o L2ARC eligible
3693  *      ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3694  *              +---------------------+----------+   |
3695  *                   15.9 Gbytes      ^ 32 Mbytes    |
3696  *                                 headroom          |
3697  *                                            l2arc_feed_thread()
3698  *                                                   |
3699  *                       l2arc write hand <--[oooo]--'
3700  *                               |           8 Mbyte
3701  *                               |          write max
3702  *                               V
3703  *                +==============================+
3704  *      L2ARC dev |####|#|###|###|    |####| ... |
3705  *                +==============================+
3706  *                           32 Gbytes
3707  *
3708  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3709  * evicted, then the L2ARC has cached a buffer much sooner than it probably
3710  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
3711  * safe to say that this is an uncommon case, since buffers at the end of
3712  * the ARC lists have moved there due to inactivity.
3713  *
3714  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3715  * then the L2ARC simply misses copying some buffers.  This serves as a
3716  * pressure valve to prevent heavy read workloads from both stalling the ARC
3717  * with waits and clogging the L2ARC with writes.  This also helps prevent
3718  * the potential for the L2ARC to churn if it attempts to cache content too
3719  * quickly, such as during backups of the entire pool.
3720  *
3721  * 5. After system boot and before the ARC has filled main memory, there are
3722  * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3723  * lists can remain mostly static.  Instead of searching from tail of these
3724  * lists as pictured, the l2arc_feed_thread() will search from the list heads
3725  * for eligible buffers, greatly increasing its chance of finding them.
3726  *
3727  * The L2ARC device write speed is also boosted during this time so that
3728  * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
3729  * there are no L2ARC reads, and no fear of degrading read performance
3730  * through increased writes.
3731  *
3732  * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3733  * the vdev queue can aggregate them into larger and fewer writes.  Each
3734  * device is written to in a rotor fashion, sweeping writes through
3735  * available space then repeating.
3736  *
3737  * 7. The L2ARC does not store dirty content.  It never needs to flush
3738  * write buffers back to disk based storage.
3739  *
3740  * 8. If an ARC buffer is written (and dirtied) which also exists in the
3741  * L2ARC, the now stale L2ARC buffer is immediately dropped.
3742  *
3743  * The performance of the L2ARC can be tweaked by a number of tunables, which
3744  * may be necessary for different workloads:
3745  *
3746  *      l2arc_write_max         max write bytes per interval
3747  *      l2arc_write_boost       extra write bytes during device warmup
3748  *      l2arc_noprefetch        skip caching prefetched buffers
3749  *      l2arc_headroom          number of max device writes to precache
3750  *      l2arc_feed_secs         seconds between L2ARC writing
3751  *
3752  * Tunables may be removed or added as future performance improvements are
3753  * integrated, and also may become zpool properties.
3754  *
3755  * There are three key functions that control how the L2ARC warms up:
3756  *
3757  *      l2arc_write_eligible()  check if a buffer is eligible to cache
3758  *      l2arc_write_size()      calculate how much to write
3759  *      l2arc_write_interval()  calculate sleep delay between writes
3760  *
3761  * These three functions determine what to write, how much, and how quickly
3762  * to send writes.
3763  */
3764 
3765 static boolean_t
3766 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3767 {
3768         /*
3769          * A buffer is *not* eligible for the L2ARC if it:
3770          * 1. belongs to a different spa.
3771          * 2. is already cached on the L2ARC.
3772          * 3. has an I/O in progress (it may be an incomplete read).
3773          * 4. is flagged not eligible (zfs property).
3774          */
3775         if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3776             HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3777                 return (B_FALSE);
3778 
3779         return (B_TRUE);
3780 }
3781 
3782 static uint64_t
3783 l2arc_write_size(l2arc_dev_t *dev)
3784 {
3785         uint64_t size;
3786 
3787         size = dev->l2ad_write;
3788 
3789         if (arc_warm == B_FALSE)
3790                 size += dev->l2ad_boost;
3791 
3792         return (size);
3793 
3794 }
3795 
3796 static clock_t
3797 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3798 {
3799         clock_t interval, next, now;
3800 
3801         /*
3802          * If the ARC lists are busy, increase our write rate; if the
3803          * lists are stale, idle back.  This is achieved by checking
3804          * how much we previously wrote - if it was more than half of
3805          * what we wanted, schedule the next write much sooner.
3806          */
3807         if (l2arc_feed_again && wrote > (wanted / 2))
3808                 interval = (hz * l2arc_feed_min_ms) / 1000;
3809         else
3810                 interval = hz * l2arc_feed_secs;
3811 
3812         now = ddi_get_lbolt();
3813         next = MAX(now, MIN(now + interval, began + interval));
3814 
3815         return (next);
3816 }
3817 
3818 static void
3819 l2arc_hdr_stat_add(void)
3820 {
3821         ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3822         ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3823 }
3824 
3825 static void
3826 l2arc_hdr_stat_remove(void)
3827 {
3828         ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3829         ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3830 }
3831 
3832 /*
3833  * Cycle through L2ARC devices.  This is how L2ARC load balances.
3834  * If a device is returned, this also returns holding the spa config lock.
3835  */
3836 static l2arc_dev_t *
3837 l2arc_dev_get_next(void)
3838 {
3839         l2arc_dev_t *first, *next = NULL;
3840 
3841         /*
3842          * Lock out the removal of spas (spa_namespace_lock), then removal
3843          * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
3844          * both locks will be dropped and a spa config lock held instead.
3845          */
3846         mutex_enter(&spa_namespace_lock);
3847         mutex_enter(&l2arc_dev_mtx);
3848 
3849         /* if there are no vdevs, there is nothing to do */
3850         if (l2arc_ndev == 0)
3851                 goto out;
3852 
3853         first = NULL;
3854         next = l2arc_dev_last;
3855         do {
3856                 /* loop around the list looking for a non-faulted vdev */
3857                 if (next == NULL) {
3858                         next = list_head(l2arc_dev_list);
3859                 } else {
3860                         next = list_next(l2arc_dev_list, next);
3861                         if (next == NULL)
3862                                 next = list_head(l2arc_dev_list);
3863                 }
3864 
3865                 /* if we have come back to the start, bail out */
3866                 if (first == NULL)
3867                         first = next;
3868                 else if (next == first)
3869                         break;
3870 
3871         } while (vdev_is_dead(next->l2ad_vdev));
3872 
3873         /* if we were unable to find any usable vdevs, return NULL */
3874         if (vdev_is_dead(next->l2ad_vdev))
3875                 next = NULL;
3876 
3877         l2arc_dev_last = next;
3878 
3879 out:
3880         mutex_exit(&l2arc_dev_mtx);
3881 
3882         /*
3883          * Grab the config lock to prevent the 'next' device from being
3884          * removed while we are writing to it.
3885          */
3886         if (next != NULL)
3887                 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3888         mutex_exit(&spa_namespace_lock);
3889 
3890         return (next);
3891 }
3892 
3893 /*
3894  * Free buffers that were tagged for destruction.
3895  */
3896 static void
3897 l2arc_do_free_on_write()
3898 {
3899         list_t *buflist;
3900         l2arc_data_free_t *df, *df_prev;
3901 
3902         mutex_enter(&l2arc_free_on_write_mtx);
3903         buflist = l2arc_free_on_write;
3904 
3905         for (df = list_tail(buflist); df; df = df_prev) {
3906                 df_prev = list_prev(buflist, df);
3907                 ASSERT(df->l2df_data != NULL);
3908                 ASSERT(df->l2df_func != NULL);
3909                 df->l2df_func(df->l2df_data, df->l2df_size);
3910                 list_remove(buflist, df);
3911                 kmem_free(df, sizeof (l2arc_data_free_t));
3912         }
3913 
3914         mutex_exit(&l2arc_free_on_write_mtx);
3915 }
3916 
3917 /*
3918  * A write to a cache device has completed.  Update all headers to allow
3919  * reads from these buffers to begin.
3920  */
3921 static void
3922 l2arc_write_done(zio_t *zio)
3923 {
3924         l2arc_write_callback_t *cb;
3925         l2arc_dev_t *dev;
3926         list_t *buflist;
3927         arc_buf_hdr_t *head, *ab, *ab_prev;
3928         l2arc_buf_hdr_t *abl2;
3929         kmutex_t *hash_lock;
3930 
3931         cb = zio->io_private;
3932         ASSERT(cb != NULL);
3933         dev = cb->l2wcb_dev;
3934         ASSERT(dev != NULL);
3935         head = cb->l2wcb_head;
3936         ASSERT(head != NULL);
3937         buflist = dev->l2ad_buflist;
3938         ASSERT(buflist != NULL);
3939         DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3940             l2arc_write_callback_t *, cb);
3941 
3942         if (zio->io_error != 0)
3943                 ARCSTAT_BUMP(arcstat_l2_writes_error);
3944 
3945         mutex_enter(&l2arc_buflist_mtx);
3946 
3947         /*
3948          * All writes completed, or an error was hit.
3949          */
3950         for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3951                 ab_prev = list_prev(buflist, ab);
3952 
3953                 hash_lock = HDR_LOCK(ab);
3954                 if (!mutex_tryenter(hash_lock)) {
3955                         /*
3956                          * This buffer misses out.  It may be in a stage
3957                          * of eviction.  Its ARC_L2_WRITING flag will be
3958                          * left set, denying reads to this buffer.
3959                          */
3960                         ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3961                         continue;
3962                 }
3963 
3964                 if (zio->io_error != 0) {
3965                         /*
3966                          * Error - drop L2ARC entry.
3967                          */
3968                         list_remove(buflist, ab);
3969                         abl2 = ab->b_l2hdr;
3970                         ab->b_l2hdr = NULL;
3971                         kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3972                         ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3973                 }
3974 
3975                 /*
3976                  * Allow ARC to begin reads to this L2ARC entry.
3977                  */
3978                 ab->b_flags &= ~ARC_L2_WRITING;
3979 
3980                 mutex_exit(hash_lock);
3981         }
3982 
3983         atomic_inc_64(&l2arc_writes_done);
3984         list_remove(buflist, head);
3985         kmem_cache_free(hdr_cache, head);
3986         mutex_exit(&l2arc_buflist_mtx);
3987 
3988         l2arc_do_free_on_write();
3989 
3990         kmem_free(cb, sizeof (l2arc_write_callback_t));
3991 }
3992 
3993 /*
3994  * A read to a cache device completed.  Validate buffer contents before
3995  * handing over to the regular ARC routines.
3996  */
3997 static void
3998 l2arc_read_done(zio_t *zio)
3999 {
4000         l2arc_read_callback_t *cb;
4001         arc_buf_hdr_t *hdr;
4002         arc_buf_t *buf;
4003         kmutex_t *hash_lock;
4004         int equal;
4005 
4006         ASSERT(zio->io_vd != NULL);
4007         ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4008 
4009         spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4010 
4011         cb = zio->io_private;
4012         ASSERT(cb != NULL);
4013         buf = cb->l2rcb_buf;
4014         ASSERT(buf != NULL);
4015 
4016         hash_lock = HDR_LOCK(buf->b_hdr);
4017         mutex_enter(hash_lock);
4018         hdr = buf->b_hdr;
4019         ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4020 
4021         /*
4022          * Check this survived the L2ARC journey.
4023          */
4024         equal = arc_cksum_equal(buf);
4025         if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4026                 mutex_exit(hash_lock);
4027                 zio->io_private = buf;
4028                 zio->io_bp_copy = cb->l2rcb_bp;   /* XXX fix in L2ARC 2.0 */
4029                 zio->io_bp = &zio->io_bp_copy;        /* XXX fix in L2ARC 2.0 */
4030                 arc_read_done(zio);
4031         } else {
4032                 mutex_exit(hash_lock);
4033                 /*
4034                  * Buffer didn't survive caching.  Increment stats and
4035                  * reissue to the original storage device.
4036                  */
4037                 if (zio->io_error != 0) {
4038                         ARCSTAT_BUMP(arcstat_l2_io_error);
4039                 } else {
4040                         zio->io_error = EIO;
4041                 }
4042                 if (!equal)
4043                         ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4044 
4045                 /*
4046                  * If there's no waiter, issue an async i/o to the primary
4047                  * storage now.  If there *is* a waiter, the caller must
4048                  * issue the i/o in a context where it's OK to block.
4049                  */
4050                 if (zio->io_waiter == NULL) {
4051                         zio_t *pio = zio_unique_parent(zio);
4052 
4053                         ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4054 
4055                         zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4056                             buf->b_data, zio->io_size, arc_read_done, buf,
4057                             zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4058                 }
4059         }
4060 
4061         kmem_free(cb, sizeof (l2arc_read_callback_t));
4062 }
4063 
4064 /*
4065  * This is the list priority from which the L2ARC will search for pages to
4066  * cache.  This is used within loops (0..3) to cycle through lists in the
4067  * desired order.  This order can have a significant effect on cache
4068  * performance.
4069  *
4070  * Currently the metadata lists are hit first, MFU then MRU, followed by
4071  * the data lists.  This function returns a locked list, and also returns
4072  * the lock pointer.
4073  */
4074 static list_t *
4075 l2arc_list_locked(int list_num, kmutex_t **lock)
4076 {
4077         list_t *list;
4078 
4079         ASSERT(list_num >= 0 && list_num <= 3);
4080 
4081         switch (list_num) {
4082         case 0:
4083                 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4084                 *lock = &arc_mfu->arcs_mtx;
4085                 break;
4086         case 1:
4087                 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4088                 *lock = &arc_mru->arcs_mtx;
4089                 break;
4090         case 2:
4091                 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4092                 *lock = &arc_mfu->arcs_mtx;
4093                 break;
4094         case 3:
4095                 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4096                 *lock = &arc_mru->arcs_mtx;
4097                 break;
4098         }
4099 
4100         ASSERT(!(MUTEX_HELD(*lock)));
4101         mutex_enter(*lock);
4102         return (list);
4103 }
4104 
4105 /*
4106  * Evict buffers from the device write hand to the distance specified in
4107  * bytes.  This distance may span populated buffers, it may span nothing.
4108  * This is clearing a region on the L2ARC device ready for writing.
4109  * If the 'all' boolean is set, every buffer is evicted.
4110  */
4111 static void
4112 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4113 {
4114         list_t *buflist;
4115         l2arc_buf_hdr_t *abl2;
4116         arc_buf_hdr_t *ab, *ab_prev;
4117         kmutex_t *hash_lock;
4118         uint64_t taddr;
4119 
4120         buflist = dev->l2ad_buflist;
4121 
4122         if (buflist == NULL)
4123                 return;
4124 
4125         if (!all && dev->l2ad_first) {
4126                 /*
4127                  * This is the first sweep through the device.  There is
4128                  * nothing to evict.
4129                  */
4130                 return;
4131         }
4132 
4133         if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4134                 /*
4135                  * When nearing the end of the device, evict to the end
4136                  * before the device write hand jumps to the start.
4137                  */
4138                 taddr = dev->l2ad_end;
4139         } else {
4140                 taddr = dev->l2ad_hand + distance;
4141         }
4142         DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4143             uint64_t, taddr, boolean_t, all);
4144 
4145 top:
4146         mutex_enter(&l2arc_buflist_mtx);
4147         for (ab = list_tail(buflist); ab; ab = ab_prev) {
4148                 ab_prev = list_prev(buflist, ab);
4149 
4150                 hash_lock = HDR_LOCK(ab);
4151                 if (!mutex_tryenter(hash_lock)) {
4152                         /*
4153                          * Missed the hash lock.  Retry.
4154                          */
4155                         ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4156                         mutex_exit(&l2arc_buflist_mtx);
4157                         mutex_enter(hash_lock);
4158                         mutex_exit(hash_lock);
4159                         goto top;
4160                 }
4161 
4162                 if (HDR_L2_WRITE_HEAD(ab)) {
4163                         /*
4164                          * We hit a write head node.  Leave it for
4165                          * l2arc_write_done().
4166                          */
4167                         list_remove(buflist, ab);
4168                         mutex_exit(hash_lock);
4169                         continue;
4170                 }
4171 
4172                 if (!all && ab->b_l2hdr != NULL &&
4173                     (ab->b_l2hdr->b_daddr > taddr ||
4174                     ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4175                         /*
4176                          * We've evicted to the target address,
4177                          * or the end of the device.
4178                          */
4179                         mutex_exit(hash_lock);
4180                         break;
4181                 }
4182 
4183                 if (HDR_FREE_IN_PROGRESS(ab)) {
4184                         /*
4185                          * Already on the path to destruction.
4186                          */
4187                         mutex_exit(hash_lock);
4188                         continue;
4189                 }
4190 
4191                 if (ab->b_state == arc_l2c_only) {
4192                         ASSERT(!HDR_L2_READING(ab));
4193                         /*
4194                          * This doesn't exist in the ARC.  Destroy.
4195                          * arc_hdr_destroy() will call list_remove()
4196                          * and decrement arcstat_l2_size.
4197                          */
4198                         arc_change_state(arc_anon, ab, hash_lock);
4199                         arc_hdr_destroy(ab);
4200                 } else {
4201                         /*
4202                          * Invalidate issued or about to be issued
4203                          * reads, since we may be about to write
4204                          * over this location.
4205                          */
4206                         if (HDR_L2_READING(ab)) {
4207                                 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4208                                 ab->b_flags |= ARC_L2_EVICTED;
4209                         }
4210 
4211                         /*
4212                          * Tell ARC this no longer exists in L2ARC.
4213                          */
4214                         if (ab->b_l2hdr != NULL) {
4215                                 abl2 = ab->b_l2hdr;
4216                                 ab->b_l2hdr = NULL;
4217                                 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4218                                 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4219                         }
4220                         list_remove(buflist, ab);
4221 
4222                         /*
4223                          * This may have been leftover after a
4224                          * failed write.
4225                          */
4226                         ab->b_flags &= ~ARC_L2_WRITING;
4227                 }
4228                 mutex_exit(hash_lock);
4229         }
4230         mutex_exit(&l2arc_buflist_mtx);
4231 
4232         vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4233         dev->l2ad_evict = taddr;
4234 }
4235 
4236 /*
4237  * Find and write ARC buffers to the L2ARC device.
4238  *
4239  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4240  * for reading until they have completed writing.
4241  */
4242 static uint64_t
4243 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4244 {
4245         arc_buf_hdr_t *ab, *ab_prev, *head;
4246         l2arc_buf_hdr_t *hdrl2;
4247         list_t *list;
4248         uint64_t passed_sz, write_sz, buf_sz, headroom;
4249         void *buf_data;
4250         kmutex_t *hash_lock, *list_lock;
4251         boolean_t have_lock, full;
4252         l2arc_write_callback_t *cb;
4253         zio_t *pio, *wzio;
4254         uint64_t guid = spa_load_guid(spa);
4255 
4256         ASSERT(dev->l2ad_vdev != NULL);
4257 
4258         pio = NULL;
4259         write_sz = 0;
4260         full = B_FALSE;
4261         head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4262         head->b_flags |= ARC_L2_WRITE_HEAD;
4263 
4264         /*
4265          * Copy buffers for L2ARC writing.
4266          */
4267         mutex_enter(&l2arc_buflist_mtx);
4268         for (int try = 0; try <= 3; try++) {
4269                 list = l2arc_list_locked(try, &list_lock);
4270                 passed_sz = 0;
4271 
4272                 /*
4273                  * L2ARC fast warmup.
4274                  *
4275                  * Until the ARC is warm and starts to evict, read from the
4276                  * head of the ARC lists rather than the tail.
4277                  */
4278                 headroom = target_sz * l2arc_headroom;
4279                 if (arc_warm == B_FALSE)
4280                         ab = list_head(list);
4281                 else
4282                         ab = list_tail(list);
4283 
4284                 for (; ab; ab = ab_prev) {
4285                         if (arc_warm == B_FALSE)
4286                                 ab_prev = list_next(list, ab);
4287                         else
4288                                 ab_prev = list_prev(list, ab);
4289 
4290                         hash_lock = HDR_LOCK(ab);
4291                         have_lock = MUTEX_HELD(hash_lock);
4292                         if (!have_lock && !mutex_tryenter(hash_lock)) {
4293                                 /*
4294                                  * Skip this buffer rather than waiting.
4295                                  */
4296                                 continue;
4297                         }
4298 
4299                         passed_sz += ab->b_size;
4300                         if (passed_sz > headroom) {
4301                                 /*
4302                                  * Searched too far.
4303                                  */
4304                                 mutex_exit(hash_lock);
4305                                 break;
4306                         }
4307 
4308                         if (!l2arc_write_eligible(guid, ab)) {
4309                                 mutex_exit(hash_lock);
4310                                 continue;
4311                         }
4312 
4313                         if ((write_sz + ab->b_size) > target_sz) {
4314                                 full = B_TRUE;
4315                                 mutex_exit(hash_lock);
4316                                 break;
4317                         }
4318 
4319                         if (pio == NULL) {
4320                                 /*
4321                                  * Insert a dummy header on the buflist so
4322                                  * l2arc_write_done() can find where the
4323                                  * write buffers begin without searching.
4324                                  */
4325                                 list_insert_head(dev->l2ad_buflist, head);
4326 
4327                                 cb = kmem_alloc(
4328                                     sizeof (l2arc_write_callback_t), KM_SLEEP);
4329                                 cb->l2wcb_dev = dev;
4330                                 cb->l2wcb_head = head;
4331                                 pio = zio_root(spa, l2arc_write_done, cb,
4332                                     ZIO_FLAG_CANFAIL);
4333                         }
4334 
4335                         /*
4336                          * Create and add a new L2ARC header.
4337                          */
4338                         hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4339                         hdrl2->b_dev = dev;
4340                         hdrl2->b_daddr = dev->l2ad_hand;
4341 
4342                         ab->b_flags |= ARC_L2_WRITING;
4343                         ab->b_l2hdr = hdrl2;
4344                         list_insert_head(dev->l2ad_buflist, ab);
4345                         buf_data = ab->b_buf->b_data;
4346                         buf_sz = ab->b_size;
4347 
4348                         /*
4349                          * Compute and store the buffer cksum before
4350                          * writing.  On debug the cksum is verified first.
4351                          */
4352                         arc_cksum_verify(ab->b_buf);
4353                         arc_cksum_compute(ab->b_buf, B_TRUE);
4354 
4355                         mutex_exit(hash_lock);
4356 
4357                         wzio = zio_write_phys(pio, dev->l2ad_vdev,
4358                             dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4359                             NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4360                             ZIO_FLAG_CANFAIL, B_FALSE);
4361 
4362                         DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4363                             zio_t *, wzio);
4364                         (void) zio_nowait(wzio);
4365 
4366                         /*
4367                          * Keep the clock hand suitably device-aligned.
4368                          */
4369                         buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4370 
4371                         write_sz += buf_sz;
4372                         dev->l2ad_hand += buf_sz;
4373                 }
4374 
4375                 mutex_exit(list_lock);
4376 
4377                 if (full == B_TRUE)
4378                         break;
4379         }
4380         mutex_exit(&l2arc_buflist_mtx);
4381 
4382         if (pio == NULL) {
4383                 ASSERT3U(write_sz, ==, 0);
4384                 kmem_cache_free(hdr_cache, head);
4385                 return (0);
4386         }
4387 
4388         ASSERT3U(write_sz, <=, target_sz);
4389         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4390         ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4391         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4392         vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4393 
4394         /*
4395          * Bump device hand to the device start if it is approaching the end.
4396          * l2arc_evict() will already have evicted ahead for this case.
4397          */
4398         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4399                 vdev_space_update(dev->l2ad_vdev,
4400                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4401                 dev->l2ad_hand = dev->l2ad_start;
4402                 dev->l2ad_evict = dev->l2ad_start;
4403                 dev->l2ad_first = B_FALSE;
4404         }
4405 
4406         dev->l2ad_writing = B_TRUE;
4407         (void) zio_wait(pio);
4408         dev->l2ad_writing = B_FALSE;
4409 
4410         return (write_sz);
4411 }
4412 
4413 /*
4414  * This thread feeds the L2ARC at regular intervals.  This is the beating
4415  * heart of the L2ARC.
4416  */
4417 static void
4418 l2arc_feed_thread(void)
4419 {
4420         callb_cpr_t cpr;
4421         l2arc_dev_t *dev;
4422         spa_t *spa;
4423         uint64_t size, wrote;
4424         clock_t begin, next = ddi_get_lbolt();
4425 
4426         CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4427 
4428         mutex_enter(&l2arc_feed_thr_lock);
4429 
4430         while (l2arc_thread_exit == 0) {
4431                 CALLB_CPR_SAFE_BEGIN(&cpr);
4432                 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4433                     next);
4434                 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4435                 next = ddi_get_lbolt() + hz;
4436 
4437                 /*
4438                  * Quick check for L2ARC devices.
4439                  */
4440                 mutex_enter(&l2arc_dev_mtx);
4441                 if (l2arc_ndev == 0) {
4442                         mutex_exit(&l2arc_dev_mtx);
4443                         continue;
4444                 }
4445                 mutex_exit(&l2arc_dev_mtx);
4446                 begin = ddi_get_lbolt();
4447 
4448                 /*
4449                  * This selects the next l2arc device to write to, and in
4450                  * doing so the next spa to feed from: dev->l2ad_spa.   This
4451                  * will return NULL if there are now no l2arc devices or if
4452                  * they are all faulted.
4453                  *
4454                  * If a device is returned, its spa's config lock is also
4455                  * held to prevent device removal.  l2arc_dev_get_next()
4456                  * will grab and release l2arc_dev_mtx.
4457                  */
4458                 if ((dev = l2arc_dev_get_next()) == NULL)
4459                         continue;
4460 
4461                 spa = dev->l2ad_spa;
4462                 ASSERT(spa != NULL);
4463 
4464                 /*
4465                  * If the pool is read-only then force the feed thread to
4466                  * sleep a little longer.
4467                  */
4468                 if (!spa_writeable(spa)) {
4469                         next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4470                         spa_config_exit(spa, SCL_L2ARC, dev);
4471                         continue;
4472                 }
4473 
4474                 /*
4475                  * Avoid contributing to memory pressure.
4476                  */
4477                 if (arc_reclaim_needed()) {
4478                         ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4479                         spa_config_exit(spa, SCL_L2ARC, dev);
4480                         continue;
4481                 }
4482 
4483                 ARCSTAT_BUMP(arcstat_l2_feeds);
4484 
4485                 size = l2arc_write_size(dev);
4486 
4487                 /*
4488                  * Evict L2ARC buffers that will be overwritten.
4489                  */
4490                 l2arc_evict(dev, size, B_FALSE);
4491 
4492                 /*
4493                  * Write ARC buffers.
4494                  */
4495                 wrote = l2arc_write_buffers(spa, dev, size);
4496 
4497                 /*
4498                  * Calculate interval between writes.
4499                  */
4500                 next = l2arc_write_interval(begin, size, wrote);
4501                 spa_config_exit(spa, SCL_L2ARC, dev);
4502         }
4503 
4504         l2arc_thread_exit = 0;
4505         cv_broadcast(&l2arc_feed_thr_cv);
4506         CALLB_CPR_EXIT(&cpr);               /* drops l2arc_feed_thr_lock */
4507         thread_exit();
4508 }
4509 
4510 boolean_t
4511 l2arc_vdev_present(vdev_t *vd)
4512 {
4513         l2arc_dev_t *dev;
4514 
4515         mutex_enter(&l2arc_dev_mtx);
4516         for (dev = list_head(l2arc_dev_list); dev != NULL;
4517             dev = list_next(l2arc_dev_list, dev)) {
4518                 if (dev->l2ad_vdev == vd)
4519                         break;
4520         }
4521         mutex_exit(&l2arc_dev_mtx);
4522 
4523         return (dev != NULL);
4524 }
4525 
4526 /*
4527  * Add a vdev for use by the L2ARC.  By this point the spa has already
4528  * validated the vdev and opened it.
4529  */
4530 void
4531 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
4532 {
4533         l2arc_dev_t *adddev;
4534 
4535         ASSERT(!l2arc_vdev_present(vd));
4536 
4537         /*
4538          * Create a new l2arc device entry.
4539          */
4540         adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4541         adddev->l2ad_spa = spa;
4542         adddev->l2ad_vdev = vd;
4543         adddev->l2ad_write = l2arc_write_max;
4544         adddev->l2ad_boost = l2arc_write_boost;
4545         adddev->l2ad_start = VDEV_LABEL_START_SIZE;
4546         adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
4547         adddev->l2ad_hand = adddev->l2ad_start;
4548         adddev->l2ad_evict = adddev->l2ad_start;
4549         adddev->l2ad_first = B_TRUE;
4550         adddev->l2ad_writing = B_FALSE;
4551         ASSERT3U(adddev->l2ad_write, >, 0);
4552 
4553         /*
4554          * This is a list of all ARC buffers that are still valid on the
4555          * device.
4556          */
4557         adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4558         list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4559             offsetof(arc_buf_hdr_t, b_l2node));
4560 
4561         vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
4562 
4563         /*
4564          * Add device to global list
4565          */
4566         mutex_enter(&l2arc_dev_mtx);
4567         list_insert_head(l2arc_dev_list, adddev);
4568         atomic_inc_64(&l2arc_ndev);
4569         mutex_exit(&l2arc_dev_mtx);
4570 }
4571 
4572 /*
4573  * Remove a vdev from the L2ARC.
4574  */
4575 void
4576 l2arc_remove_vdev(vdev_t *vd)
4577 {
4578         l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4579 
4580         /*
4581          * Find the device by vdev
4582          */
4583         mutex_enter(&l2arc_dev_mtx);
4584         for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4585                 nextdev = list_next(l2arc_dev_list, dev);
4586                 if (vd == dev->l2ad_vdev) {
4587                         remdev = dev;
4588                         break;
4589                 }
4590         }
4591         ASSERT(remdev != NULL);
4592 
4593         /*
4594          * Remove device from global list
4595          */
4596         list_remove(l2arc_dev_list, remdev);
4597         l2arc_dev_last = NULL;          /* may have been invalidated */
4598         atomic_dec_64(&l2arc_ndev);
4599         mutex_exit(&l2arc_dev_mtx);
4600 
4601         /*
4602          * Clear all buflists and ARC references.  L2ARC device flush.
4603          */
4604         l2arc_evict(remdev, 0, B_TRUE);
4605         list_destroy(remdev->l2ad_buflist);
4606         kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4607         kmem_free(remdev, sizeof (l2arc_dev_t));
4608 }
4609 
4610 void
4611 l2arc_init(void)
4612 {
4613         l2arc_thread_exit = 0;
4614         l2arc_ndev = 0;
4615         l2arc_writes_sent = 0;
4616         l2arc_writes_done = 0;
4617 
4618         mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4619         cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4620         mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4621         mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4622         mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4623 
4624         l2arc_dev_list = &L2ARC_dev_list;
4625         l2arc_free_on_write = &L2ARC_free_on_write;
4626         list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4627             offsetof(l2arc_dev_t, l2ad_node));
4628         list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4629             offsetof(l2arc_data_free_t, l2df_list_node));
4630 }
4631 
4632 void
4633 l2arc_fini(void)
4634 {
4635         /*
4636          * This is called from dmu_fini(), which is called from spa_fini();
4637          * Because of this, we can assume that all l2arc devices have
4638          * already been removed when the pools themselves were removed.
4639          */
4640 
4641         l2arc_do_free_on_write();
4642 
4643         mutex_destroy(&l2arc_feed_thr_lock);
4644         cv_destroy(&l2arc_feed_thr_cv);
4645         mutex_destroy(&l2arc_dev_mtx);
4646         mutex_destroy(&l2arc_buflist_mtx);
4647         mutex_destroy(&l2arc_free_on_write_mtx);
4648 
4649         list_destroy(l2arc_dev_list);
4650         list_destroy(l2arc_free_on_write);
4651 }
4652 
4653 void
4654 l2arc_start(void)
4655 {
4656         if (!(spa_mode_global & FWRITE))
4657                 return;
4658 
4659         (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4660             TS_RUN, minclsyspri);
4661 }
4662 
4663 void
4664 l2arc_stop(void)
4665 {
4666         if (!(spa_mode_global & FWRITE))
4667                 return;
4668 
4669         mutex_enter(&l2arc_feed_thr_lock);
4670         cv_signal(&l2arc_feed_thr_cv);      /* kick thread out of startup */
4671         l2arc_thread_exit = 1;
4672         while (l2arc_thread_exit != 0)
4673                 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4674         mutex_exit(&l2arc_feed_thr_lock);
4675 }