1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29 /* 30 * DVA-based Adjustable Replacement Cache 31 * 32 * While much of the theory of operation used here is 33 * based on the self-tuning, low overhead replacement cache 34 * presented by Megiddo and Modha at FAST 2003, there are some 35 * significant differences: 36 * 37 * 1. The Megiddo and Modha model assumes any page is evictable. 38 * Pages in its cache cannot be "locked" into memory. This makes 39 * the eviction algorithm simple: evict the last page in the list. 40 * This also make the performance characteristics easy to reason 41 * about. Our cache is not so simple. At any given moment, some 42 * subset of the blocks in the cache are un-evictable because we 43 * have handed out a reference to them. Blocks are only evictable 44 * when there are no external references active. This makes 45 * eviction far more problematic: we choose to evict the evictable 46 * blocks that are the "lowest" in the list. 47 * 48 * There are times when it is not possible to evict the requested 49 * space. In these circumstances we are unable to adjust the cache 50 * size. To prevent the cache growing unbounded at these times we 51 * implement a "cache throttle" that slows the flow of new data 52 * into the cache until we can make space available. 53 * 54 * 2. The Megiddo and Modha model assumes a fixed cache size. 55 * Pages are evicted when the cache is full and there is a cache 56 * miss. Our model has a variable sized cache. It grows with 57 * high use, but also tries to react to memory pressure from the 58 * operating system: decreasing its size when system memory is 59 * tight. 60 * 61 * 3. The Megiddo and Modha model assumes a fixed page size. All 62 * elements of the cache are therefore exactly the same size. So 63 * when adjusting the cache size following a cache miss, its simply 64 * a matter of choosing a single page to evict. In our model, we 65 * have variable sized cache blocks (rangeing from 512 bytes to 66 * 128K bytes). We therefore choose a set of blocks to evict to make 67 * space for a cache miss that approximates as closely as possible 68 * the space used by the new block. 69 * 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71 * by N. Megiddo & D. Modha, FAST 2003 72 */ 73 74 /* 75 * The locking model: 76 * 77 * A new reference to a cache buffer can be obtained in two 78 * ways: 1) via a hash table lookup using the DVA as a key, 79 * or 2) via one of the ARC lists. The arc_read() interface 80 * uses method 1, while the internal arc algorithms for 81 * adjusting the cache use method 2. We therefore provide two 82 * types of locks: 1) the hash table lock array, and 2) the 83 * arc list locks. 84 * 85 * Buffers do not have their own mutexes, rather they rely on the 86 * hash table mutexes for the bulk of their protection (i.e. most 87 * fields in the arc_buf_hdr_t are protected by these mutexes). 88 * 89 * buf_hash_find() returns the appropriate mutex (held) when it 90 * locates the requested buffer in the hash table. It returns 91 * NULL for the mutex if the buffer was not in the table. 92 * 93 * buf_hash_remove() expects the appropriate hash mutex to be 94 * already held before it is invoked. 95 * 96 * Each arc state also has a mutex which is used to protect the 97 * buffer list associated with the state. When attempting to 98 * obtain a hash table lock while holding an arc list lock you 99 * must use: mutex_tryenter() to avoid deadlock. Also note that 100 * the active state mutex must be held before the ghost state mutex. 101 * 102 * Arc buffers may have an associated eviction callback function. 103 * This function will be invoked prior to removing the buffer (e.g. 104 * in arc_do_user_evicts()). Note however that the data associated 105 * with the buffer may be evicted prior to the callback. The callback 106 * must be made with *no locks held* (to prevent deadlock). Additionally, 107 * the users of callbacks must ensure that their private data is 108 * protected from simultaneous callbacks from arc_clear_callback() 109 * and arc_do_user_evicts(). 110 * 111 * Note that the majority of the performance stats are manipulated 112 * with atomic operations. 113 * 114 * The L2ARC uses the l2ad_mtx on each vdev for the following: 115 * 116 * - L2ARC buflist creation 117 * - L2ARC buflist eviction 118 * - L2ARC write completion, which walks L2ARC buflists 119 * - ARC header destruction, as it removes from L2ARC buflists 120 * - ARC header release, as it removes from L2ARC buflists 121 */ 122 123 #include <sys/spa.h> 124 #include <sys/zio.h> 125 #include <sys/zio_compress.h> 126 #include <sys/zfs_context.h> 127 #include <sys/arc.h> 128 #include <sys/refcount.h> 129 #include <sys/vdev.h> 130 #include <sys/vdev_impl.h> 131 #include <sys/dsl_pool.h> 132 #include <sys/multilist.h> 133 #ifdef _KERNEL 134 #include <sys/vmsystm.h> 135 #include <vm/anon.h> 136 #include <sys/fs/swapnode.h> 137 #include <sys/dnlc.h> 138 #endif 139 #include <sys/callb.h> 140 #include <sys/kstat.h> 141 #include <zfs_fletcher.h> 142 143 #ifndef _KERNEL 144 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 145 boolean_t arc_watch = B_FALSE; 146 int arc_procfd; 147 #endif 148 149 static kmutex_t arc_reclaim_lock; 150 static kcondvar_t arc_reclaim_thread_cv; 151 static boolean_t arc_reclaim_thread_exit; 152 static kcondvar_t arc_reclaim_waiters_cv; 153 154 static kmutex_t arc_user_evicts_lock; 155 static kcondvar_t arc_user_evicts_cv; 156 static boolean_t arc_user_evicts_thread_exit; 157 158 uint_t arc_reduce_dnlc_percent = 3; 159 160 /* 161 * The number of headers to evict in arc_evict_state_impl() before 162 * dropping the sublist lock and evicting from another sublist. A lower 163 * value means we're more likely to evict the "correct" header (i.e. the 164 * oldest header in the arc state), but comes with higher overhead 165 * (i.e. more invocations of arc_evict_state_impl()). 166 */ 167 int zfs_arc_evict_batch_limit = 10; 168 169 /* 170 * The number of sublists used for each of the arc state lists. If this 171 * is not set to a suitable value by the user, it will be configured to 172 * the number of CPUs on the system in arc_init(). 173 */ 174 int zfs_arc_num_sublists_per_state = 0; 175 176 /* number of seconds before growing cache again */ 177 static int arc_grow_retry = 60; 178 179 /* shift of arc_c for calculating overflow limit in arc_get_data_buf */ 180 int zfs_arc_overflow_shift = 8; 181 182 /* shift of arc_c for calculating both min and max arc_p */ 183 static int arc_p_min_shift = 4; 184 185 /* log2(fraction of arc to reclaim) */ 186 static int arc_shrink_shift = 7; 187 188 /* 189 * log2(fraction of ARC which must be free to allow growing). 190 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, 191 * when reading a new block into the ARC, we will evict an equal-sized block 192 * from the ARC. 193 * 194 * This must be less than arc_shrink_shift, so that when we shrink the ARC, 195 * we will still not allow it to grow. 196 */ 197 int arc_no_grow_shift = 5; 198 199 200 /* 201 * minimum lifespan of a prefetch block in clock ticks 202 * (initialized in arc_init()) 203 */ 204 static int arc_min_prefetch_lifespan; 205 206 /* 207 * If this percent of memory is free, don't throttle. 208 */ 209 int arc_lotsfree_percent = 10; 210 211 static int arc_dead; 212 213 /* 214 * The arc has filled available memory and has now warmed up. 215 */ 216 static boolean_t arc_warm; 217 218 /* 219 * These tunables are for performance analysis. 220 */ 221 uint64_t zfs_arc_max; 222 uint64_t zfs_arc_min; 223 uint64_t zfs_arc_meta_limit = 0; 224 uint64_t zfs_arc_meta_min = 0; 225 int zfs_arc_grow_retry = 0; 226 int zfs_arc_shrink_shift = 0; 227 int zfs_arc_p_min_shift = 0; 228 int zfs_disable_dup_eviction = 0; 229 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 230 231 /* 232 * Note that buffers can be in one of 6 states: 233 * ARC_anon - anonymous (discussed below) 234 * ARC_mru - recently used, currently cached 235 * ARC_mru_ghost - recentely used, no longer in cache 236 * ARC_mfu - frequently used, currently cached 237 * ARC_mfu_ghost - frequently used, no longer in cache 238 * ARC_l2c_only - exists in L2ARC but not other states 239 * When there are no active references to the buffer, they are 240 * are linked onto a list in one of these arc states. These are 241 * the only buffers that can be evicted or deleted. Within each 242 * state there are multiple lists, one for meta-data and one for 243 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 244 * etc.) is tracked separately so that it can be managed more 245 * explicitly: favored over data, limited explicitly. 246 * 247 * Anonymous buffers are buffers that are not associated with 248 * a DVA. These are buffers that hold dirty block copies 249 * before they are written to stable storage. By definition, 250 * they are "ref'd" and are considered part of arc_mru 251 * that cannot be freed. Generally, they will aquire a DVA 252 * as they are written and migrate onto the arc_mru list. 253 * 254 * The ARC_l2c_only state is for buffers that are in the second 255 * level ARC but no longer in any of the ARC_m* lists. The second 256 * level ARC itself may also contain buffers that are in any of 257 * the ARC_m* states - meaning that a buffer can exist in two 258 * places. The reason for the ARC_l2c_only state is to keep the 259 * buffer header in the hash table, so that reads that hit the 260 * second level ARC benefit from these fast lookups. 261 */ 262 263 typedef struct arc_state { 264 /* 265 * list of evictable buffers 266 */ 267 multilist_t arcs_list[ARC_BUFC_NUMTYPES]; 268 /* 269 * total amount of evictable data in this state 270 */ 271 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; 272 /* 273 * total amount of data in this state; this includes: evictable, 274 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA. 275 */ 276 refcount_t arcs_size; 277 } arc_state_t; 278 279 /* The 6 states: */ 280 static arc_state_t ARC_anon; 281 static arc_state_t ARC_mru; 282 static arc_state_t ARC_mru_ghost; 283 static arc_state_t ARC_mfu; 284 static arc_state_t ARC_mfu_ghost; 285 static arc_state_t ARC_l2c_only; 286 287 typedef struct arc_stats { 288 kstat_named_t arcstat_hits; 289 kstat_named_t arcstat_misses; 290 kstat_named_t arcstat_demand_data_hits; 291 kstat_named_t arcstat_demand_data_misses; 292 kstat_named_t arcstat_demand_metadata_hits; 293 kstat_named_t arcstat_demand_metadata_misses; 294 kstat_named_t arcstat_prefetch_data_hits; 295 kstat_named_t arcstat_prefetch_data_misses; 296 kstat_named_t arcstat_prefetch_metadata_hits; 297 kstat_named_t arcstat_prefetch_metadata_misses; 298 kstat_named_t arcstat_mru_hits; 299 kstat_named_t arcstat_mru_ghost_hits; 300 kstat_named_t arcstat_mfu_hits; 301 kstat_named_t arcstat_mfu_ghost_hits; 302 kstat_named_t arcstat_deleted; 303 /* 304 * Number of buffers that could not be evicted because the hash lock 305 * was held by another thread. The lock may not necessarily be held 306 * by something using the same buffer, since hash locks are shared 307 * by multiple buffers. 308 */ 309 kstat_named_t arcstat_mutex_miss; 310 /* 311 * Number of buffers skipped because they have I/O in progress, are 312 * indrect prefetch buffers that have not lived long enough, or are 313 * not from the spa we're trying to evict from. 314 */ 315 kstat_named_t arcstat_evict_skip; 316 /* 317 * Number of times arc_evict_state() was unable to evict enough 318 * buffers to reach it's target amount. 319 */ 320 kstat_named_t arcstat_evict_not_enough; 321 kstat_named_t arcstat_evict_l2_cached; 322 kstat_named_t arcstat_evict_l2_eligible; 323 kstat_named_t arcstat_evict_l2_ineligible; 324 kstat_named_t arcstat_evict_l2_skip; 325 kstat_named_t arcstat_hash_elements; 326 kstat_named_t arcstat_hash_elements_max; 327 kstat_named_t arcstat_hash_collisions; 328 kstat_named_t arcstat_hash_chains; 329 kstat_named_t arcstat_hash_chain_max; 330 kstat_named_t arcstat_p; 331 kstat_named_t arcstat_c; 332 kstat_named_t arcstat_c_min; 333 kstat_named_t arcstat_c_max; 334 kstat_named_t arcstat_size; 335 /* 336 * Number of bytes consumed by internal ARC structures necessary 337 * for tracking purposes; these structures are not actually 338 * backed by ARC buffers. This includes arc_buf_hdr_t structures 339 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only 340 * caches), and arc_buf_t structures (allocated via arc_buf_t 341 * cache). 342 */ 343 kstat_named_t arcstat_hdr_size; 344 /* 345 * Number of bytes consumed by ARC buffers of type equal to 346 * ARC_BUFC_DATA. This is generally consumed by buffers backing 347 * on disk user data (e.g. plain file contents). 348 */ 349 kstat_named_t arcstat_data_size; 350 /* 351 * Number of bytes consumed by ARC buffers of type equal to 352 * ARC_BUFC_METADATA. This is generally consumed by buffers 353 * backing on disk data that is used for internal ZFS 354 * structures (e.g. ZAP, dnode, indirect blocks, etc). 355 */ 356 kstat_named_t arcstat_metadata_size; 357 /* 358 * Number of bytes consumed by various buffers and structures 359 * not actually backed with ARC buffers. This includes bonus 360 * buffers (allocated directly via zio_buf_* functions), 361 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t 362 * cache), and dnode_t structures (allocated via dnode_t cache). 363 */ 364 kstat_named_t arcstat_other_size; 365 /* 366 * Total number of bytes consumed by ARC buffers residing in the 367 * arc_anon state. This includes *all* buffers in the arc_anon 368 * state; e.g. data, metadata, evictable, and unevictable buffers 369 * are all included in this value. 370 */ 371 kstat_named_t arcstat_anon_size; 372 /* 373 * Number of bytes consumed by ARC buffers that meet the 374 * following criteria: backing buffers of type ARC_BUFC_DATA, 375 * residing in the arc_anon state, and are eligible for eviction 376 * (e.g. have no outstanding holds on the buffer). 377 */ 378 kstat_named_t arcstat_anon_evictable_data; 379 /* 380 * Number of bytes consumed by ARC buffers that meet the 381 * following criteria: backing buffers of type ARC_BUFC_METADATA, 382 * residing in the arc_anon state, and are eligible for eviction 383 * (e.g. have no outstanding holds on the buffer). 384 */ 385 kstat_named_t arcstat_anon_evictable_metadata; 386 /* 387 * Total number of bytes consumed by ARC buffers residing in the 388 * arc_mru state. This includes *all* buffers in the arc_mru 389 * state; e.g. data, metadata, evictable, and unevictable buffers 390 * are all included in this value. 391 */ 392 kstat_named_t arcstat_mru_size; 393 /* 394 * Number of bytes consumed by ARC buffers that meet the 395 * following criteria: backing buffers of type ARC_BUFC_DATA, 396 * residing in the arc_mru state, and are eligible for eviction 397 * (e.g. have no outstanding holds on the buffer). 398 */ 399 kstat_named_t arcstat_mru_evictable_data; 400 /* 401 * Number of bytes consumed by ARC buffers that meet the 402 * following criteria: backing buffers of type ARC_BUFC_METADATA, 403 * residing in the arc_mru state, and are eligible for eviction 404 * (e.g. have no outstanding holds on the buffer). 405 */ 406 kstat_named_t arcstat_mru_evictable_metadata; 407 /* 408 * Total number of bytes that *would have been* consumed by ARC 409 * buffers in the arc_mru_ghost state. The key thing to note 410 * here, is the fact that this size doesn't actually indicate 411 * RAM consumption. The ghost lists only consist of headers and 412 * don't actually have ARC buffers linked off of these headers. 413 * Thus, *if* the headers had associated ARC buffers, these 414 * buffers *would have* consumed this number of bytes. 415 */ 416 kstat_named_t arcstat_mru_ghost_size; 417 /* 418 * Number of bytes that *would have been* consumed by ARC 419 * buffers that are eligible for eviction, of type 420 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. 421 */ 422 kstat_named_t arcstat_mru_ghost_evictable_data; 423 /* 424 * Number of bytes that *would have been* consumed by ARC 425 * buffers that are eligible for eviction, of type 426 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 427 */ 428 kstat_named_t arcstat_mru_ghost_evictable_metadata; 429 /* 430 * Total number of bytes consumed by ARC buffers residing in the 431 * arc_mfu state. This includes *all* buffers in the arc_mfu 432 * state; e.g. data, metadata, evictable, and unevictable buffers 433 * are all included in this value. 434 */ 435 kstat_named_t arcstat_mfu_size; 436 /* 437 * Number of bytes consumed by ARC buffers that are eligible for 438 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu 439 * state. 440 */ 441 kstat_named_t arcstat_mfu_evictable_data; 442 /* 443 * Number of bytes consumed by ARC buffers that are eligible for 444 * eviction, of type ARC_BUFC_METADATA, and reside in the 445 * arc_mfu state. 446 */ 447 kstat_named_t arcstat_mfu_evictable_metadata; 448 /* 449 * Total number of bytes that *would have been* consumed by ARC 450 * buffers in the arc_mfu_ghost state. See the comment above 451 * arcstat_mru_ghost_size for more details. 452 */ 453 kstat_named_t arcstat_mfu_ghost_size; 454 /* 455 * Number of bytes that *would have been* consumed by ARC 456 * buffers that are eligible for eviction, of type 457 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. 458 */ 459 kstat_named_t arcstat_mfu_ghost_evictable_data; 460 /* 461 * Number of bytes that *would have been* consumed by ARC 462 * buffers that are eligible for eviction, of type 463 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 464 */ 465 kstat_named_t arcstat_mfu_ghost_evictable_metadata; 466 kstat_named_t arcstat_l2_hits; 467 kstat_named_t arcstat_l2_misses; 468 kstat_named_t arcstat_l2_feeds; 469 kstat_named_t arcstat_l2_rw_clash; 470 kstat_named_t arcstat_l2_read_bytes; 471 kstat_named_t arcstat_l2_write_bytes; 472 kstat_named_t arcstat_l2_writes_sent; 473 kstat_named_t arcstat_l2_writes_done; 474 kstat_named_t arcstat_l2_writes_error; 475 kstat_named_t arcstat_l2_writes_lock_retry; 476 kstat_named_t arcstat_l2_evict_lock_retry; 477 kstat_named_t arcstat_l2_evict_reading; 478 kstat_named_t arcstat_l2_evict_l1cached; 479 kstat_named_t arcstat_l2_free_on_write; 480 kstat_named_t arcstat_l2_cdata_free_on_write; 481 kstat_named_t arcstat_l2_abort_lowmem; 482 kstat_named_t arcstat_l2_cksum_bad; 483 kstat_named_t arcstat_l2_io_error; 484 kstat_named_t arcstat_l2_size; 485 kstat_named_t arcstat_l2_asize; 486 kstat_named_t arcstat_l2_hdr_size; 487 kstat_named_t arcstat_l2_compress_successes; 488 kstat_named_t arcstat_l2_compress_zeros; 489 kstat_named_t arcstat_l2_compress_failures; 490 kstat_named_t arcstat_memory_throttle_count; 491 kstat_named_t arcstat_duplicate_buffers; 492 kstat_named_t arcstat_duplicate_buffers_size; 493 kstat_named_t arcstat_duplicate_reads; 494 kstat_named_t arcstat_meta_used; 495 kstat_named_t arcstat_meta_limit; 496 kstat_named_t arcstat_meta_max; 497 kstat_named_t arcstat_meta_min; 498 } arc_stats_t; 499 500 static arc_stats_t arc_stats = { 501 { "hits", KSTAT_DATA_UINT64 }, 502 { "misses", KSTAT_DATA_UINT64 }, 503 { "demand_data_hits", KSTAT_DATA_UINT64 }, 504 { "demand_data_misses", KSTAT_DATA_UINT64 }, 505 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 506 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 507 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 508 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 509 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 510 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 511 { "mru_hits", KSTAT_DATA_UINT64 }, 512 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 513 { "mfu_hits", KSTAT_DATA_UINT64 }, 514 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 515 { "deleted", KSTAT_DATA_UINT64 }, 516 { "mutex_miss", KSTAT_DATA_UINT64 }, 517 { "evict_skip", KSTAT_DATA_UINT64 }, 518 { "evict_not_enough", KSTAT_DATA_UINT64 }, 519 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 520 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 521 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 522 { "evict_l2_skip", KSTAT_DATA_UINT64 }, 523 { "hash_elements", KSTAT_DATA_UINT64 }, 524 { "hash_elements_max", KSTAT_DATA_UINT64 }, 525 { "hash_collisions", KSTAT_DATA_UINT64 }, 526 { "hash_chains", KSTAT_DATA_UINT64 }, 527 { "hash_chain_max", KSTAT_DATA_UINT64 }, 528 { "p", KSTAT_DATA_UINT64 }, 529 { "c", KSTAT_DATA_UINT64 }, 530 { "c_min", KSTAT_DATA_UINT64 }, 531 { "c_max", KSTAT_DATA_UINT64 }, 532 { "size", KSTAT_DATA_UINT64 }, 533 { "hdr_size", KSTAT_DATA_UINT64 }, 534 { "data_size", KSTAT_DATA_UINT64 }, 535 { "metadata_size", KSTAT_DATA_UINT64 }, 536 { "other_size", KSTAT_DATA_UINT64 }, 537 { "anon_size", KSTAT_DATA_UINT64 }, 538 { "anon_evictable_data", KSTAT_DATA_UINT64 }, 539 { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, 540 { "mru_size", KSTAT_DATA_UINT64 }, 541 { "mru_evictable_data", KSTAT_DATA_UINT64 }, 542 { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, 543 { "mru_ghost_size", KSTAT_DATA_UINT64 }, 544 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, 545 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 546 { "mfu_size", KSTAT_DATA_UINT64 }, 547 { "mfu_evictable_data", KSTAT_DATA_UINT64 }, 548 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, 549 { "mfu_ghost_size", KSTAT_DATA_UINT64 }, 550 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, 551 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 552 { "l2_hits", KSTAT_DATA_UINT64 }, 553 { "l2_misses", KSTAT_DATA_UINT64 }, 554 { "l2_feeds", KSTAT_DATA_UINT64 }, 555 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 556 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 557 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 558 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 559 { "l2_writes_done", KSTAT_DATA_UINT64 }, 560 { "l2_writes_error", KSTAT_DATA_UINT64 }, 561 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, 562 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 563 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 564 { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, 565 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 566 { "l2_cdata_free_on_write", KSTAT_DATA_UINT64 }, 567 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 568 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 569 { "l2_io_error", KSTAT_DATA_UINT64 }, 570 { "l2_size", KSTAT_DATA_UINT64 }, 571 { "l2_asize", KSTAT_DATA_UINT64 }, 572 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 573 { "l2_compress_successes", KSTAT_DATA_UINT64 }, 574 { "l2_compress_zeros", KSTAT_DATA_UINT64 }, 575 { "l2_compress_failures", KSTAT_DATA_UINT64 }, 576 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 577 { "duplicate_buffers", KSTAT_DATA_UINT64 }, 578 { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 579 { "duplicate_reads", KSTAT_DATA_UINT64 }, 580 { "arc_meta_used", KSTAT_DATA_UINT64 }, 581 { "arc_meta_limit", KSTAT_DATA_UINT64 }, 582 { "arc_meta_max", KSTAT_DATA_UINT64 }, 583 { "arc_meta_min", KSTAT_DATA_UINT64 } 584 }; 585 586 #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 587 588 #define ARCSTAT_INCR(stat, val) \ 589 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 590 591 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 592 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 593 594 #define ARCSTAT_MAX(stat, val) { \ 595 uint64_t m; \ 596 while ((val) > (m = arc_stats.stat.value.ui64) && \ 597 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 598 continue; \ 599 } 600 601 #define ARCSTAT_MAXSTAT(stat) \ 602 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 603 604 /* 605 * We define a macro to allow ARC hits/misses to be easily broken down by 606 * two separate conditions, giving a total of four different subtypes for 607 * each of hits and misses (so eight statistics total). 608 */ 609 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 610 if (cond1) { \ 611 if (cond2) { \ 612 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 613 } else { \ 614 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 615 } \ 616 } else { \ 617 if (cond2) { \ 618 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 619 } else { \ 620 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 621 } \ 622 } 623 624 kstat_t *arc_ksp; 625 static arc_state_t *arc_anon; 626 static arc_state_t *arc_mru; 627 static arc_state_t *arc_mru_ghost; 628 static arc_state_t *arc_mfu; 629 static arc_state_t *arc_mfu_ghost; 630 static arc_state_t *arc_l2c_only; 631 632 /* 633 * There are several ARC variables that are critical to export as kstats -- 634 * but we don't want to have to grovel around in the kstat whenever we wish to 635 * manipulate them. For these variables, we therefore define them to be in 636 * terms of the statistic variable. This assures that we are not introducing 637 * the possibility of inconsistency by having shadow copies of the variables, 638 * while still allowing the code to be readable. 639 */ 640 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 641 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 642 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 643 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 644 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 645 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 646 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 647 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ 648 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 649 650 #define L2ARC_IS_VALID_COMPRESS(_c_) \ 651 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY) 652 653 static int arc_no_grow; /* Don't try to grow cache size */ 654 static uint64_t arc_tempreserve; 655 static uint64_t arc_loaned_bytes; 656 657 typedef struct arc_callback arc_callback_t; 658 659 struct arc_callback { 660 void *acb_private; 661 arc_done_func_t *acb_done; 662 arc_buf_t *acb_buf; 663 zio_t *acb_zio_dummy; 664 arc_callback_t *acb_next; 665 }; 666 667 typedef struct arc_write_callback arc_write_callback_t; 668 669 struct arc_write_callback { 670 void *awcb_private; 671 arc_done_func_t *awcb_ready; 672 arc_done_func_t *awcb_physdone; 673 arc_done_func_t *awcb_done; 674 arc_buf_t *awcb_buf; 675 }; 676 677 /* 678 * ARC buffers are separated into multiple structs as a memory saving measure: 679 * - Common fields struct, always defined, and embedded within it: 680 * - L2-only fields, always allocated but undefined when not in L2ARC 681 * - L1-only fields, only allocated when in L1ARC 682 * 683 * Buffer in L1 Buffer only in L2 684 * +------------------------+ +------------------------+ 685 * | arc_buf_hdr_t | | arc_buf_hdr_t | 686 * | | | | 687 * | | | | 688 * | | | | 689 * +------------------------+ +------------------------+ 690 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t | 691 * | (undefined if L1-only) | | | 692 * +------------------------+ +------------------------+ 693 * | l1arc_buf_hdr_t | 694 * | | 695 * | | 696 * | | 697 * | | 698 * +------------------------+ 699 * 700 * Because it's possible for the L2ARC to become extremely large, we can wind 701 * up eating a lot of memory in L2ARC buffer headers, so the size of a header 702 * is minimized by only allocating the fields necessary for an L1-cached buffer 703 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and 704 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple 705 * words in pointers. arc_hdr_realloc() is used to switch a header between 706 * these two allocation states. 707 */ 708 typedef struct l1arc_buf_hdr { 709 kmutex_t b_freeze_lock; 710 #ifdef ZFS_DEBUG 711 /* 712 * used for debugging wtih kmem_flags - by allocating and freeing 713 * b_thawed when the buffer is thawed, we get a record of the stack 714 * trace that thawed it. 715 */ 716 void *b_thawed; 717 #endif 718 719 arc_buf_t *b_buf; 720 uint32_t b_datacnt; 721 /* for waiting on writes to complete */ 722 kcondvar_t b_cv; 723 724 /* protected by arc state mutex */ 725 arc_state_t *b_state; 726 multilist_node_t b_arc_node; 727 728 /* updated atomically */ 729 clock_t b_arc_access; 730 731 /* self protecting */ 732 refcount_t b_refcnt; 733 734 arc_callback_t *b_acb; 735 /* temporary buffer holder for in-flight compressed data */ 736 void *b_tmp_cdata; 737 } l1arc_buf_hdr_t; 738 739 typedef struct l2arc_dev l2arc_dev_t; 740 741 typedef struct l2arc_buf_hdr { 742 /* protected by arc_buf_hdr mutex */ 743 l2arc_dev_t *b_dev; /* L2ARC device */ 744 uint64_t b_daddr; /* disk address, offset byte */ 745 /* real alloc'd buffer size depending on b_compress applied */ 746 int32_t b_asize; 747 uint8_t b_compress; 748 749 list_node_t b_l2node; 750 } l2arc_buf_hdr_t; 751 752 struct arc_buf_hdr { 753 /* protected by hash lock */ 754 dva_t b_dva; 755 uint64_t b_birth; 756 /* 757 * Even though this checksum is only set/verified when a buffer is in 758 * the L1 cache, it needs to be in the set of common fields because it 759 * must be preserved from the time before a buffer is written out to 760 * L2ARC until after it is read back in. 761 */ 762 zio_cksum_t *b_freeze_cksum; 763 764 arc_buf_hdr_t *b_hash_next; 765 arc_flags_t b_flags; 766 767 /* immutable */ 768 int32_t b_size; 769 uint64_t b_spa; 770 771 /* L2ARC fields. Undefined when not in L2ARC. */ 772 l2arc_buf_hdr_t b_l2hdr; 773 /* L1ARC fields. Undefined when in l2arc_only state */ 774 l1arc_buf_hdr_t b_l1hdr; 775 }; 776 777 static arc_buf_t *arc_eviction_list; 778 static arc_buf_hdr_t arc_eviction_hdr; 779 780 #define GHOST_STATE(state) \ 781 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 782 (state) == arc_l2c_only) 783 784 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 785 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 786 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 787 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 788 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ) 789 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE) 790 791 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 792 #define HDR_L2COMPRESS(hdr) ((hdr)->b_flags & ARC_FLAG_L2COMPRESS) 793 #define HDR_L2_READING(hdr) \ 794 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ 795 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) 796 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 797 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 798 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 799 800 #define HDR_ISTYPE_METADATA(hdr) \ 801 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) 802 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) 803 804 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) 805 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) 806 807 /* 808 * Other sizes 809 */ 810 811 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 812 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) 813 814 /* 815 * Hash table routines 816 */ 817 818 #define HT_LOCK_PAD 64 819 820 struct ht_lock { 821 kmutex_t ht_lock; 822 #ifdef _KERNEL 823 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 824 #endif 825 }; 826 827 #define BUF_LOCKS 256 828 typedef struct buf_hash_table { 829 uint64_t ht_mask; 830 arc_buf_hdr_t **ht_table; 831 struct ht_lock ht_locks[BUF_LOCKS]; 832 } buf_hash_table_t; 833 834 static buf_hash_table_t buf_hash_table; 835 836 #define BUF_HASH_INDEX(spa, dva, birth) \ 837 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 838 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 839 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 840 #define HDR_LOCK(hdr) \ 841 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 842 843 uint64_t zfs_crc64_table[256]; 844 845 /* 846 * Level 2 ARC 847 */ 848 849 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 850 #define L2ARC_HEADROOM 2 /* num of writes */ 851 /* 852 * If we discover during ARC scan any buffers to be compressed, we boost 853 * our headroom for the next scanning cycle by this percentage multiple. 854 */ 855 #define L2ARC_HEADROOM_BOOST 200 856 #define L2ARC_FEED_SECS 1 /* caching interval secs */ 857 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 858 859 /* 860 * Used to distinguish headers that are being process by 861 * l2arc_write_buffers(), but have yet to be assigned to a l2arc disk 862 * address. This can happen when the header is added to the l2arc's list 863 * of buffers to write in the first stage of l2arc_write_buffers(), but 864 * has not yet been written out which happens in the second stage of 865 * l2arc_write_buffers(). 866 */ 867 #define L2ARC_ADDR_UNSET ((uint64_t)(-1)) 868 869 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 870 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 871 872 /* L2ARC Performance Tunables */ 873 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 874 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 875 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 876 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 877 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 878 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 879 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 880 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 881 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 882 883 /* 884 * L2ARC Internals 885 */ 886 struct l2arc_dev { 887 vdev_t *l2ad_vdev; /* vdev */ 888 spa_t *l2ad_spa; /* spa */ 889 uint64_t l2ad_hand; /* next write location */ 890 uint64_t l2ad_start; /* first addr on device */ 891 uint64_t l2ad_end; /* last addr on device */ 892 boolean_t l2ad_first; /* first sweep through */ 893 boolean_t l2ad_writing; /* currently writing */ 894 kmutex_t l2ad_mtx; /* lock for buffer list */ 895 list_t l2ad_buflist; /* buffer list */ 896 list_node_t l2ad_node; /* device list node */ 897 refcount_t l2ad_alloc; /* allocated bytes */ 898 }; 899 900 static list_t L2ARC_dev_list; /* device list */ 901 static list_t *l2arc_dev_list; /* device list pointer */ 902 static kmutex_t l2arc_dev_mtx; /* device list mutex */ 903 static l2arc_dev_t *l2arc_dev_last; /* last device used */ 904 static list_t L2ARC_free_on_write; /* free after write buf list */ 905 static list_t *l2arc_free_on_write; /* free after write list ptr */ 906 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 907 static uint64_t l2arc_ndev; /* number of devices */ 908 909 typedef struct l2arc_read_callback { 910 arc_buf_t *l2rcb_buf; /* read buffer */ 911 spa_t *l2rcb_spa; /* spa */ 912 blkptr_t l2rcb_bp; /* original blkptr */ 913 zbookmark_phys_t l2rcb_zb; /* original bookmark */ 914 int l2rcb_flags; /* original flags */ 915 enum zio_compress l2rcb_compress; /* applied compress */ 916 } l2arc_read_callback_t; 917 918 typedef struct l2arc_write_callback { 919 l2arc_dev_t *l2wcb_dev; /* device info */ 920 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 921 } l2arc_write_callback_t; 922 923 typedef struct l2arc_data_free { 924 /* protected by l2arc_free_on_write_mtx */ 925 void *l2df_data; 926 size_t l2df_size; 927 void (*l2df_func)(void *, size_t); 928 list_node_t l2df_list_node; 929 } l2arc_data_free_t; 930 931 static kmutex_t l2arc_feed_thr_lock; 932 static kcondvar_t l2arc_feed_thr_cv; 933 static uint8_t l2arc_thread_exit; 934 935 static void arc_get_data_buf(arc_buf_t *); 936 static void arc_access(arc_buf_hdr_t *, kmutex_t *); 937 static boolean_t arc_is_overflowing(); 938 static void arc_buf_watch(arc_buf_t *); 939 940 static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); 941 static uint32_t arc_bufc_to_flags(arc_buf_contents_t); 942 943 static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 944 static void l2arc_read_done(zio_t *); 945 946 static boolean_t l2arc_compress_buf(arc_buf_hdr_t *); 947 static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress); 948 static void l2arc_release_cdata_buf(arc_buf_hdr_t *); 949 950 static uint64_t 951 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 952 { 953 uint8_t *vdva = (uint8_t *)dva; 954 uint64_t crc = -1ULL; 955 int i; 956 957 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 958 959 for (i = 0; i < sizeof (dva_t); i++) 960 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 961 962 crc ^= (spa>>8) ^ birth; 963 964 return (crc); 965 } 966 967 #define BUF_EMPTY(buf) \ 968 ((buf)->b_dva.dva_word[0] == 0 && \ 969 (buf)->b_dva.dva_word[1] == 0) 970 971 #define BUF_EQUAL(spa, dva, birth, buf) \ 972 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 973 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 974 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 975 976 static void 977 buf_discard_identity(arc_buf_hdr_t *hdr) 978 { 979 hdr->b_dva.dva_word[0] = 0; 980 hdr->b_dva.dva_word[1] = 0; 981 hdr->b_birth = 0; 982 } 983 984 static arc_buf_hdr_t * 985 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 986 { 987 const dva_t *dva = BP_IDENTITY(bp); 988 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 989 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 990 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 991 arc_buf_hdr_t *hdr; 992 993 mutex_enter(hash_lock); 994 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 995 hdr = hdr->b_hash_next) { 996 if (BUF_EQUAL(spa, dva, birth, hdr)) { 997 *lockp = hash_lock; 998 return (hdr); 999 } 1000 } 1001 mutex_exit(hash_lock); 1002 *lockp = NULL; 1003 return (NULL); 1004 } 1005 1006 /* 1007 * Insert an entry into the hash table. If there is already an element 1008 * equal to elem in the hash table, then the already existing element 1009 * will be returned and the new element will not be inserted. 1010 * Otherwise returns NULL. 1011 * If lockp == NULL, the caller is assumed to already hold the hash lock. 1012 */ 1013 static arc_buf_hdr_t * 1014 buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 1015 { 1016 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1017 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1018 arc_buf_hdr_t *fhdr; 1019 uint32_t i; 1020 1021 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 1022 ASSERT(hdr->b_birth != 0); 1023 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1024 1025 if (lockp != NULL) { 1026 *lockp = hash_lock; 1027 mutex_enter(hash_lock); 1028 } else { 1029 ASSERT(MUTEX_HELD(hash_lock)); 1030 } 1031 1032 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 1033 fhdr = fhdr->b_hash_next, i++) { 1034 if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 1035 return (fhdr); 1036 } 1037 1038 hdr->b_hash_next = buf_hash_table.ht_table[idx]; 1039 buf_hash_table.ht_table[idx] = hdr; 1040 hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; 1041 1042 /* collect some hash table performance data */ 1043 if (i > 0) { 1044 ARCSTAT_BUMP(arcstat_hash_collisions); 1045 if (i == 1) 1046 ARCSTAT_BUMP(arcstat_hash_chains); 1047 1048 ARCSTAT_MAX(arcstat_hash_chain_max, i); 1049 } 1050 1051 ARCSTAT_BUMP(arcstat_hash_elements); 1052 ARCSTAT_MAXSTAT(arcstat_hash_elements); 1053 1054 return (NULL); 1055 } 1056 1057 static void 1058 buf_hash_remove(arc_buf_hdr_t *hdr) 1059 { 1060 arc_buf_hdr_t *fhdr, **hdrp; 1061 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1062 1063 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 1064 ASSERT(HDR_IN_HASH_TABLE(hdr)); 1065 1066 hdrp = &buf_hash_table.ht_table[idx]; 1067 while ((fhdr = *hdrp) != hdr) { 1068 ASSERT(fhdr != NULL); 1069 hdrp = &fhdr->b_hash_next; 1070 } 1071 *hdrp = hdr->b_hash_next; 1072 hdr->b_hash_next = NULL; 1073 hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE; 1074 1075 /* collect some hash table performance data */ 1076 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 1077 1078 if (buf_hash_table.ht_table[idx] && 1079 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 1080 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 1081 } 1082 1083 /* 1084 * Global data structures and functions for the buf kmem cache. 1085 */ 1086 static kmem_cache_t *hdr_full_cache; 1087 static kmem_cache_t *hdr_l2only_cache; 1088 static kmem_cache_t *buf_cache; 1089 1090 static void 1091 buf_fini(void) 1092 { 1093 int i; 1094 1095 kmem_free(buf_hash_table.ht_table, 1096 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 1097 for (i = 0; i < BUF_LOCKS; i++) 1098 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 1099 kmem_cache_destroy(hdr_full_cache); 1100 kmem_cache_destroy(hdr_l2only_cache); 1101 kmem_cache_destroy(buf_cache); 1102 } 1103 1104 /* 1105 * Constructor callback - called when the cache is empty 1106 * and a new buf is requested. 1107 */ 1108 /* ARGSUSED */ 1109 static int 1110 hdr_full_cons(void *vbuf, void *unused, int kmflag) 1111 { 1112 arc_buf_hdr_t *hdr = vbuf; 1113 1114 bzero(hdr, HDR_FULL_SIZE); 1115 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); 1116 refcount_create(&hdr->b_l1hdr.b_refcnt); 1117 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 1118 multilist_link_init(&hdr->b_l1hdr.b_arc_node); 1119 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1120 1121 return (0); 1122 } 1123 1124 /* ARGSUSED */ 1125 static int 1126 hdr_l2only_cons(void *vbuf, void *unused, int kmflag) 1127 { 1128 arc_buf_hdr_t *hdr = vbuf; 1129 1130 bzero(hdr, HDR_L2ONLY_SIZE); 1131 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1132 1133 return (0); 1134 } 1135 1136 /* ARGSUSED */ 1137 static int 1138 buf_cons(void *vbuf, void *unused, int kmflag) 1139 { 1140 arc_buf_t *buf = vbuf; 1141 1142 bzero(buf, sizeof (arc_buf_t)); 1143 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1144 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1145 1146 return (0); 1147 } 1148 1149 /* 1150 * Destructor callback - called when a cached buf is 1151 * no longer required. 1152 */ 1153 /* ARGSUSED */ 1154 static void 1155 hdr_full_dest(void *vbuf, void *unused) 1156 { 1157 arc_buf_hdr_t *hdr = vbuf; 1158 1159 ASSERT(BUF_EMPTY(hdr)); 1160 cv_destroy(&hdr->b_l1hdr.b_cv); 1161 refcount_destroy(&hdr->b_l1hdr.b_refcnt); 1162 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); 1163 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1164 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1165 } 1166 1167 /* ARGSUSED */ 1168 static void 1169 hdr_l2only_dest(void *vbuf, void *unused) 1170 { 1171 arc_buf_hdr_t *hdr = vbuf; 1172 1173 ASSERT(BUF_EMPTY(hdr)); 1174 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1175 } 1176 1177 /* ARGSUSED */ 1178 static void 1179 buf_dest(void *vbuf, void *unused) 1180 { 1181 arc_buf_t *buf = vbuf; 1182 1183 mutex_destroy(&buf->b_evict_lock); 1184 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1185 } 1186 1187 /* 1188 * Reclaim callback -- invoked when memory is low. 1189 */ 1190 /* ARGSUSED */ 1191 static void 1192 hdr_recl(void *unused) 1193 { 1194 dprintf("hdr_recl called\n"); 1195 /* 1196 * umem calls the reclaim func when we destroy the buf cache, 1197 * which is after we do arc_fini(). 1198 */ 1199 if (!arc_dead) 1200 cv_signal(&arc_reclaim_thread_cv); 1201 } 1202 1203 static void 1204 buf_init(void) 1205 { 1206 uint64_t *ct; 1207 uint64_t hsize = 1ULL << 12; 1208 int i, j; 1209 1210 /* 1211 * The hash table is big enough to fill all of physical memory 1212 * with an average block size of zfs_arc_average_blocksize (default 8K). 1213 * By default, the table will take up 1214 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 1215 */ 1216 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) 1217 hsize <<= 1; 1218 retry: 1219 buf_hash_table.ht_mask = hsize - 1; 1220 buf_hash_table.ht_table = 1221 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1222 if (buf_hash_table.ht_table == NULL) { 1223 ASSERT(hsize > (1ULL << 8)); 1224 hsize >>= 1; 1225 goto retry; 1226 } 1227 1228 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 1229 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); 1230 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", 1231 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, 1232 NULL, NULL, 0); 1233 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1234 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1235 1236 for (i = 0; i < 256; i++) 1237 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1238 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1239 1240 for (i = 0; i < BUF_LOCKS; i++) { 1241 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1242 NULL, MUTEX_DEFAULT, NULL); 1243 } 1244 } 1245 1246 /* 1247 * Transition between the two allocation states for the arc_buf_hdr struct. 1248 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without 1249 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller 1250 * version is used when a cache buffer is only in the L2ARC in order to reduce 1251 * memory usage. 1252 */ 1253 static arc_buf_hdr_t * 1254 arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) 1255 { 1256 ASSERT(HDR_HAS_L2HDR(hdr)); 1257 1258 arc_buf_hdr_t *nhdr; 1259 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 1260 1261 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || 1262 (old == hdr_l2only_cache && new == hdr_full_cache)); 1263 1264 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); 1265 1266 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 1267 buf_hash_remove(hdr); 1268 1269 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); 1270 1271 if (new == hdr_full_cache) { 1272 nhdr->b_flags |= ARC_FLAG_HAS_L1HDR; 1273 /* 1274 * arc_access and arc_change_state need to be aware that a 1275 * header has just come out of L2ARC, so we set its state to 1276 * l2c_only even though it's about to change. 1277 */ 1278 nhdr->b_l1hdr.b_state = arc_l2c_only; 1279 1280 /* Verify previous threads set to NULL before freeing */ 1281 ASSERT3P(nhdr->b_l1hdr.b_tmp_cdata, ==, NULL); 1282 } else { 1283 ASSERT(hdr->b_l1hdr.b_buf == NULL); 1284 ASSERT0(hdr->b_l1hdr.b_datacnt); 1285 1286 /* 1287 * If we've reached here, We must have been called from 1288 * arc_evict_hdr(), as such we should have already been 1289 * removed from any ghost list we were previously on 1290 * (which protects us from racing with arc_evict_state), 1291 * thus no locking is needed during this check. 1292 */ 1293 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1294 1295 /* 1296 * A buffer must not be moved into the arc_l2c_only 1297 * state if it's not finished being written out to the 1298 * l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field 1299 * might try to be accessed, even though it was removed. 1300 */ 1301 VERIFY(!HDR_L2_WRITING(hdr)); 1302 VERIFY3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); 1303 1304 nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR; 1305 } 1306 /* 1307 * The header has been reallocated so we need to re-insert it into any 1308 * lists it was on. 1309 */ 1310 (void) buf_hash_insert(nhdr, NULL); 1311 1312 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); 1313 1314 mutex_enter(&dev->l2ad_mtx); 1315 1316 /* 1317 * We must place the realloc'ed header back into the list at 1318 * the same spot. Otherwise, if it's placed earlier in the list, 1319 * l2arc_write_buffers() could find it during the function's 1320 * write phase, and try to write it out to the l2arc. 1321 */ 1322 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); 1323 list_remove(&dev->l2ad_buflist, hdr); 1324 1325 mutex_exit(&dev->l2ad_mtx); 1326 1327 /* 1328 * Since we're using the pointer address as the tag when 1329 * incrementing and decrementing the l2ad_alloc refcount, we 1330 * must remove the old pointer (that we're about to destroy) and 1331 * add the new pointer to the refcount. Otherwise we'd remove 1332 * the wrong pointer address when calling arc_hdr_destroy() later. 1333 */ 1334 1335 (void) refcount_remove_many(&dev->l2ad_alloc, 1336 hdr->b_l2hdr.b_asize, hdr); 1337 1338 (void) refcount_add_many(&dev->l2ad_alloc, 1339 nhdr->b_l2hdr.b_asize, nhdr); 1340 1341 buf_discard_identity(hdr); 1342 hdr->b_freeze_cksum = NULL; 1343 kmem_cache_free(old, hdr); 1344 1345 return (nhdr); 1346 } 1347 1348 1349 #define ARC_MINTIME (hz>>4) /* 62 ms */ 1350 1351 static void 1352 arc_cksum_verify(arc_buf_t *buf) 1353 { 1354 zio_cksum_t zc; 1355 1356 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1357 return; 1358 1359 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1360 if (buf->b_hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(buf->b_hdr)) { 1361 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1362 return; 1363 } 1364 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1365 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1366 panic("buffer modified while frozen!"); 1367 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1368 } 1369 1370 static int 1371 arc_cksum_equal(arc_buf_t *buf) 1372 { 1373 zio_cksum_t zc; 1374 int equal; 1375 1376 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1377 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1378 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1379 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1380 1381 return (equal); 1382 } 1383 1384 static void 1385 arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1386 { 1387 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1388 return; 1389 1390 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1391 if (buf->b_hdr->b_freeze_cksum != NULL) { 1392 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1393 return; 1394 } 1395 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1396 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1397 buf->b_hdr->b_freeze_cksum); 1398 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1399 arc_buf_watch(buf); 1400 } 1401 1402 #ifndef _KERNEL 1403 typedef struct procctl { 1404 long cmd; 1405 prwatch_t prwatch; 1406 } procctl_t; 1407 #endif 1408 1409 /* ARGSUSED */ 1410 static void 1411 arc_buf_unwatch(arc_buf_t *buf) 1412 { 1413 #ifndef _KERNEL 1414 if (arc_watch) { 1415 int result; 1416 procctl_t ctl; 1417 ctl.cmd = PCWATCH; 1418 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1419 ctl.prwatch.pr_size = 0; 1420 ctl.prwatch.pr_wflags = 0; 1421 result = write(arc_procfd, &ctl, sizeof (ctl)); 1422 ASSERT3U(result, ==, sizeof (ctl)); 1423 } 1424 #endif 1425 } 1426 1427 /* ARGSUSED */ 1428 static void 1429 arc_buf_watch(arc_buf_t *buf) 1430 { 1431 #ifndef _KERNEL 1432 if (arc_watch) { 1433 int result; 1434 procctl_t ctl; 1435 ctl.cmd = PCWATCH; 1436 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1437 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1438 ctl.prwatch.pr_wflags = WA_WRITE; 1439 result = write(arc_procfd, &ctl, sizeof (ctl)); 1440 ASSERT3U(result, ==, sizeof (ctl)); 1441 } 1442 #endif 1443 } 1444 1445 static arc_buf_contents_t 1446 arc_buf_type(arc_buf_hdr_t *hdr) 1447 { 1448 if (HDR_ISTYPE_METADATA(hdr)) { 1449 return (ARC_BUFC_METADATA); 1450 } else { 1451 return (ARC_BUFC_DATA); 1452 } 1453 } 1454 1455 static uint32_t 1456 arc_bufc_to_flags(arc_buf_contents_t type) 1457 { 1458 switch (type) { 1459 case ARC_BUFC_DATA: 1460 /* metadata field is 0 if buffer contains normal data */ 1461 return (0); 1462 case ARC_BUFC_METADATA: 1463 return (ARC_FLAG_BUFC_METADATA); 1464 default: 1465 break; 1466 } 1467 panic("undefined ARC buffer type!"); 1468 return ((uint32_t)-1); 1469 } 1470 1471 void 1472 arc_buf_thaw(arc_buf_t *buf) 1473 { 1474 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1475 if (buf->b_hdr->b_l1hdr.b_state != arc_anon) 1476 panic("modifying non-anon buffer!"); 1477 if (HDR_IO_IN_PROGRESS(buf->b_hdr)) 1478 panic("modifying buffer while i/o in progress!"); 1479 arc_cksum_verify(buf); 1480 } 1481 1482 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1483 if (buf->b_hdr->b_freeze_cksum != NULL) { 1484 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1485 buf->b_hdr->b_freeze_cksum = NULL; 1486 } 1487 1488 #ifdef ZFS_DEBUG 1489 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1490 if (buf->b_hdr->b_l1hdr.b_thawed != NULL) 1491 kmem_free(buf->b_hdr->b_l1hdr.b_thawed, 1); 1492 buf->b_hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP); 1493 } 1494 #endif 1495 1496 mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); 1497 1498 arc_buf_unwatch(buf); 1499 } 1500 1501 void 1502 arc_buf_freeze(arc_buf_t *buf) 1503 { 1504 kmutex_t *hash_lock; 1505 1506 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1507 return; 1508 1509 hash_lock = HDR_LOCK(buf->b_hdr); 1510 mutex_enter(hash_lock); 1511 1512 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1513 buf->b_hdr->b_l1hdr.b_state == arc_anon); 1514 arc_cksum_compute(buf, B_FALSE); 1515 mutex_exit(hash_lock); 1516 1517 } 1518 1519 static void 1520 add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 1521 { 1522 ASSERT(HDR_HAS_L1HDR(hdr)); 1523 ASSERT(MUTEX_HELD(hash_lock)); 1524 arc_state_t *state = hdr->b_l1hdr.b_state; 1525 1526 if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && 1527 (state != arc_anon)) { 1528 /* We don't use the L2-only state list. */ 1529 if (state != arc_l2c_only) { 1530 arc_buf_contents_t type = arc_buf_type(hdr); 1531 uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt; 1532 multilist_t *list = &state->arcs_list[type]; 1533 uint64_t *size = &state->arcs_lsize[type]; 1534 1535 multilist_remove(list, hdr); 1536 1537 if (GHOST_STATE(state)) { 1538 ASSERT0(hdr->b_l1hdr.b_datacnt); 1539 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 1540 delta = hdr->b_size; 1541 } 1542 ASSERT(delta > 0); 1543 ASSERT3U(*size, >=, delta); 1544 atomic_add_64(size, -delta); 1545 } 1546 /* remove the prefetch flag if we get a reference */ 1547 hdr->b_flags &= ~ARC_FLAG_PREFETCH; 1548 } 1549 } 1550 1551 static int 1552 remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 1553 { 1554 int cnt; 1555 arc_state_t *state = hdr->b_l1hdr.b_state; 1556 1557 ASSERT(HDR_HAS_L1HDR(hdr)); 1558 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1559 ASSERT(!GHOST_STATE(state)); 1560 1561 /* 1562 * arc_l2c_only counts as a ghost state so we don't need to explicitly 1563 * check to prevent usage of the arc_l2c_only list. 1564 */ 1565 if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && 1566 (state != arc_anon)) { 1567 arc_buf_contents_t type = arc_buf_type(hdr); 1568 multilist_t *list = &state->arcs_list[type]; 1569 uint64_t *size = &state->arcs_lsize[type]; 1570 1571 multilist_insert(list, hdr); 1572 1573 ASSERT(hdr->b_l1hdr.b_datacnt > 0); 1574 atomic_add_64(size, hdr->b_size * 1575 hdr->b_l1hdr.b_datacnt); 1576 } 1577 return (cnt); 1578 } 1579 1580 /* 1581 * Move the supplied buffer to the indicated state. The hash lock 1582 * for the buffer must be held by the caller. 1583 */ 1584 static void 1585 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 1586 kmutex_t *hash_lock) 1587 { 1588 arc_state_t *old_state; 1589 int64_t refcnt; 1590 uint32_t datacnt; 1591 uint64_t from_delta, to_delta; 1592 arc_buf_contents_t buftype = arc_buf_type(hdr); 1593 1594 /* 1595 * We almost always have an L1 hdr here, since we call arc_hdr_realloc() 1596 * in arc_read() when bringing a buffer out of the L2ARC. However, the 1597 * L1 hdr doesn't always exist when we change state to arc_anon before 1598 * destroying a header, in which case reallocating to add the L1 hdr is 1599 * pointless. 1600 */ 1601 if (HDR_HAS_L1HDR(hdr)) { 1602 old_state = hdr->b_l1hdr.b_state; 1603 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); 1604 datacnt = hdr->b_l1hdr.b_datacnt; 1605 } else { 1606 old_state = arc_l2c_only; 1607 refcnt = 0; 1608 datacnt = 0; 1609 } 1610 1611 ASSERT(MUTEX_HELD(hash_lock)); 1612 ASSERT3P(new_state, !=, old_state); 1613 ASSERT(refcnt == 0 || datacnt > 0); 1614 ASSERT(!GHOST_STATE(new_state) || datacnt == 0); 1615 ASSERT(old_state != arc_anon || datacnt <= 1); 1616 1617 from_delta = to_delta = datacnt * hdr->b_size; 1618 1619 /* 1620 * If this buffer is evictable, transfer it from the 1621 * old state list to the new state list. 1622 */ 1623 if (refcnt == 0) { 1624 if (old_state != arc_anon && old_state != arc_l2c_only) { 1625 uint64_t *size = &old_state->arcs_lsize[buftype]; 1626 1627 ASSERT(HDR_HAS_L1HDR(hdr)); 1628 multilist_remove(&old_state->arcs_list[buftype], hdr); 1629 1630 /* 1631 * If prefetching out of the ghost cache, 1632 * we will have a non-zero datacnt. 1633 */ 1634 if (GHOST_STATE(old_state) && datacnt == 0) { 1635 /* ghost elements have a ghost size */ 1636 ASSERT(hdr->b_l1hdr.b_buf == NULL); 1637 from_delta = hdr->b_size; 1638 } 1639 ASSERT3U(*size, >=, from_delta); 1640 atomic_add_64(size, -from_delta); 1641 } 1642 if (new_state != arc_anon && new_state != arc_l2c_only) { 1643 uint64_t *size = &new_state->arcs_lsize[buftype]; 1644 1645 /* 1646 * An L1 header always exists here, since if we're 1647 * moving to some L1-cached state (i.e. not l2c_only or 1648 * anonymous), we realloc the header to add an L1hdr 1649 * beforehand. 1650 */ 1651 ASSERT(HDR_HAS_L1HDR(hdr)); 1652 multilist_insert(&new_state->arcs_list[buftype], hdr); 1653 1654 /* ghost elements have a ghost size */ 1655 if (GHOST_STATE(new_state)) { 1656 ASSERT0(datacnt); 1657 ASSERT(hdr->b_l1hdr.b_buf == NULL); 1658 to_delta = hdr->b_size; 1659 } 1660 atomic_add_64(size, to_delta); 1661 } 1662 } 1663 1664 ASSERT(!BUF_EMPTY(hdr)); 1665 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 1666 buf_hash_remove(hdr); 1667 1668 /* adjust state sizes (ignore arc_l2c_only) */ 1669 1670 if (to_delta && new_state != arc_l2c_only) { 1671 ASSERT(HDR_HAS_L1HDR(hdr)); 1672 if (GHOST_STATE(new_state)) { 1673 ASSERT0(datacnt); 1674 1675 /* 1676 * We moving a header to a ghost state, we first 1677 * remove all arc buffers. Thus, we'll have a 1678 * datacnt of zero, and no arc buffer to use for 1679 * the reference. As a result, we use the arc 1680 * header pointer for the reference. 1681 */ 1682 (void) refcount_add_many(&new_state->arcs_size, 1683 hdr->b_size, hdr); 1684 } else { 1685 ASSERT3U(datacnt, !=, 0); 1686 1687 /* 1688 * Each individual buffer holds a unique reference, 1689 * thus we must remove each of these references one 1690 * at a time. 1691 */ 1692 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 1693 buf = buf->b_next) { 1694 (void) refcount_add_many(&new_state->arcs_size, 1695 hdr->b_size, buf); 1696 } 1697 } 1698 } 1699 1700 if (from_delta && old_state != arc_l2c_only) { 1701 ASSERT(HDR_HAS_L1HDR(hdr)); 1702 if (GHOST_STATE(old_state)) { 1703 /* 1704 * When moving a header off of a ghost state, 1705 * there's the possibility for datacnt to be 1706 * non-zero. This is because we first add the 1707 * arc buffer to the header prior to changing 1708 * the header's state. Since we used the header 1709 * for the reference when putting the header on 1710 * the ghost state, we must balance that and use 1711 * the header when removing off the ghost state 1712 * (even though datacnt is non zero). 1713 */ 1714 1715 IMPLY(datacnt == 0, new_state == arc_anon || 1716 new_state == arc_l2c_only); 1717 1718 (void) refcount_remove_many(&old_state->arcs_size, 1719 hdr->b_size, hdr); 1720 } else { 1721 ASSERT3P(datacnt, !=, 0); 1722 1723 /* 1724 * Each individual buffer holds a unique reference, 1725 * thus we must remove each of these references one 1726 * at a time. 1727 */ 1728 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 1729 buf = buf->b_next) { 1730 (void) refcount_remove_many( 1731 &old_state->arcs_size, hdr->b_size, buf); 1732 } 1733 } 1734 } 1735 1736 if (HDR_HAS_L1HDR(hdr)) 1737 hdr->b_l1hdr.b_state = new_state; 1738 1739 /* 1740 * L2 headers should never be on the L2 state list since they don't 1741 * have L1 headers allocated. 1742 */ 1743 ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && 1744 multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); 1745 } 1746 1747 void 1748 arc_space_consume(uint64_t space, arc_space_type_t type) 1749 { 1750 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1751 1752 switch (type) { 1753 case ARC_SPACE_DATA: 1754 ARCSTAT_INCR(arcstat_data_size, space); 1755 break; 1756 case ARC_SPACE_META: 1757 ARCSTAT_INCR(arcstat_metadata_size, space); 1758 break; 1759 case ARC_SPACE_OTHER: 1760 ARCSTAT_INCR(arcstat_other_size, space); 1761 break; 1762 case ARC_SPACE_HDRS: 1763 ARCSTAT_INCR(arcstat_hdr_size, space); 1764 break; 1765 case ARC_SPACE_L2HDRS: 1766 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1767 break; 1768 } 1769 1770 if (type != ARC_SPACE_DATA) 1771 ARCSTAT_INCR(arcstat_meta_used, space); 1772 1773 atomic_add_64(&arc_size, space); 1774 } 1775 1776 void 1777 arc_space_return(uint64_t space, arc_space_type_t type) 1778 { 1779 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1780 1781 switch (type) { 1782 case ARC_SPACE_DATA: 1783 ARCSTAT_INCR(arcstat_data_size, -space); 1784 break; 1785 case ARC_SPACE_META: 1786 ARCSTAT_INCR(arcstat_metadata_size, -space); 1787 break; 1788 case ARC_SPACE_OTHER: 1789 ARCSTAT_INCR(arcstat_other_size, -space); 1790 break; 1791 case ARC_SPACE_HDRS: 1792 ARCSTAT_INCR(arcstat_hdr_size, -space); 1793 break; 1794 case ARC_SPACE_L2HDRS: 1795 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1796 break; 1797 } 1798 1799 if (type != ARC_SPACE_DATA) { 1800 ASSERT(arc_meta_used >= space); 1801 if (arc_meta_max < arc_meta_used) 1802 arc_meta_max = arc_meta_used; 1803 ARCSTAT_INCR(arcstat_meta_used, -space); 1804 } 1805 1806 ASSERT(arc_size >= space); 1807 atomic_add_64(&arc_size, -space); 1808 } 1809 1810 arc_buf_t * 1811 arc_buf_alloc(spa_t *spa, int32_t size, void *tag, arc_buf_contents_t type) 1812 { 1813 arc_buf_hdr_t *hdr; 1814 arc_buf_t *buf; 1815 1816 ASSERT3U(size, >, 0); 1817 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 1818 ASSERT(BUF_EMPTY(hdr)); 1819 ASSERT3P(hdr->b_freeze_cksum, ==, NULL); 1820 hdr->b_size = size; 1821 hdr->b_spa = spa_load_guid(spa); 1822 1823 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1824 buf->b_hdr = hdr; 1825 buf->b_data = NULL; 1826 buf->b_efunc = NULL; 1827 buf->b_private = NULL; 1828 buf->b_next = NULL; 1829 1830 hdr->b_flags = arc_bufc_to_flags(type); 1831 hdr->b_flags |= ARC_FLAG_HAS_L1HDR; 1832 1833 hdr->b_l1hdr.b_buf = buf; 1834 hdr->b_l1hdr.b_state = arc_anon; 1835 hdr->b_l1hdr.b_arc_access = 0; 1836 hdr->b_l1hdr.b_datacnt = 1; 1837 hdr->b_l1hdr.b_tmp_cdata = NULL; 1838 1839 arc_get_data_buf(buf); 1840 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 1841 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 1842 1843 return (buf); 1844 } 1845 1846 static char *arc_onloan_tag = "onloan"; 1847 1848 /* 1849 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1850 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1851 * buffers must be returned to the arc before they can be used by the DMU or 1852 * freed. 1853 */ 1854 arc_buf_t * 1855 arc_loan_buf(spa_t *spa, int size) 1856 { 1857 arc_buf_t *buf; 1858 1859 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1860 1861 atomic_add_64(&arc_loaned_bytes, size); 1862 return (buf); 1863 } 1864 1865 /* 1866 * Return a loaned arc buffer to the arc. 1867 */ 1868 void 1869 arc_return_buf(arc_buf_t *buf, void *tag) 1870 { 1871 arc_buf_hdr_t *hdr = buf->b_hdr; 1872 1873 ASSERT(buf->b_data != NULL); 1874 ASSERT(HDR_HAS_L1HDR(hdr)); 1875 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 1876 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 1877 1878 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1879 } 1880 1881 /* Detach an arc_buf from a dbuf (tag) */ 1882 void 1883 arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1884 { 1885 arc_buf_hdr_t *hdr = buf->b_hdr; 1886 1887 ASSERT(buf->b_data != NULL); 1888 ASSERT(HDR_HAS_L1HDR(hdr)); 1889 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 1890 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); 1891 buf->b_efunc = NULL; 1892 buf->b_private = NULL; 1893 1894 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1895 } 1896 1897 static arc_buf_t * 1898 arc_buf_clone(arc_buf_t *from) 1899 { 1900 arc_buf_t *buf; 1901 arc_buf_hdr_t *hdr = from->b_hdr; 1902 uint64_t size = hdr->b_size; 1903 1904 ASSERT(HDR_HAS_L1HDR(hdr)); 1905 ASSERT(hdr->b_l1hdr.b_state != arc_anon); 1906 1907 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1908 buf->b_hdr = hdr; 1909 buf->b_data = NULL; 1910 buf->b_efunc = NULL; 1911 buf->b_private = NULL; 1912 buf->b_next = hdr->b_l1hdr.b_buf; 1913 hdr->b_l1hdr.b_buf = buf; 1914 arc_get_data_buf(buf); 1915 bcopy(from->b_data, buf->b_data, size); 1916 1917 /* 1918 * This buffer already exists in the arc so create a duplicate 1919 * copy for the caller. If the buffer is associated with user data 1920 * then track the size and number of duplicates. These stats will be 1921 * updated as duplicate buffers are created and destroyed. 1922 */ 1923 if (HDR_ISTYPE_DATA(hdr)) { 1924 ARCSTAT_BUMP(arcstat_duplicate_buffers); 1925 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 1926 } 1927 hdr->b_l1hdr.b_datacnt += 1; 1928 return (buf); 1929 } 1930 1931 void 1932 arc_buf_add_ref(arc_buf_t *buf, void* tag) 1933 { 1934 arc_buf_hdr_t *hdr; 1935 kmutex_t *hash_lock; 1936 1937 /* 1938 * Check to see if this buffer is evicted. Callers 1939 * must verify b_data != NULL to know if the add_ref 1940 * was successful. 1941 */ 1942 mutex_enter(&buf->b_evict_lock); 1943 if (buf->b_data == NULL) { 1944 mutex_exit(&buf->b_evict_lock); 1945 return; 1946 } 1947 hash_lock = HDR_LOCK(buf->b_hdr); 1948 mutex_enter(hash_lock); 1949 hdr = buf->b_hdr; 1950 ASSERT(HDR_HAS_L1HDR(hdr)); 1951 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1952 mutex_exit(&buf->b_evict_lock); 1953 1954 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 1955 hdr->b_l1hdr.b_state == arc_mfu); 1956 1957 add_reference(hdr, hash_lock, tag); 1958 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1959 arc_access(hdr, hash_lock); 1960 mutex_exit(hash_lock); 1961 ARCSTAT_BUMP(arcstat_hits); 1962 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 1963 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 1964 data, metadata, hits); 1965 } 1966 1967 static void 1968 arc_buf_free_on_write(void *data, size_t size, 1969 void (*free_func)(void *, size_t)) 1970 { 1971 l2arc_data_free_t *df; 1972 1973 df = kmem_alloc(sizeof (*df), KM_SLEEP); 1974 df->l2df_data = data; 1975 df->l2df_size = size; 1976 df->l2df_func = free_func; 1977 mutex_enter(&l2arc_free_on_write_mtx); 1978 list_insert_head(l2arc_free_on_write, df); 1979 mutex_exit(&l2arc_free_on_write_mtx); 1980 } 1981 1982 /* 1983 * Free the arc data buffer. If it is an l2arc write in progress, 1984 * the buffer is placed on l2arc_free_on_write to be freed later. 1985 */ 1986 static void 1987 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1988 { 1989 arc_buf_hdr_t *hdr = buf->b_hdr; 1990 1991 if (HDR_L2_WRITING(hdr)) { 1992 arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func); 1993 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1994 } else { 1995 free_func(buf->b_data, hdr->b_size); 1996 } 1997 } 1998 1999 static void 2000 arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr) 2001 { 2002 ASSERT(HDR_HAS_L2HDR(hdr)); 2003 ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx)); 2004 2005 /* 2006 * The b_tmp_cdata field is linked off of the b_l1hdr, so if 2007 * that doesn't exist, the header is in the arc_l2c_only state, 2008 * and there isn't anything to free (it's already been freed). 2009 */ 2010 if (!HDR_HAS_L1HDR(hdr)) 2011 return; 2012 2013 /* 2014 * The header isn't being written to the l2arc device, thus it 2015 * shouldn't have a b_tmp_cdata to free. 2016 */ 2017 if (!HDR_L2_WRITING(hdr)) { 2018 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); 2019 return; 2020 } 2021 2022 /* 2023 * The header does not have compression enabled. This can be due 2024 * to the buffer not being compressible, or because we're 2025 * freeing the buffer before the second phase of 2026 * l2arc_write_buffer() has started (which does the compression 2027 * step). In either case, b_tmp_cdata does not point to a 2028 * separately compressed buffer, so there's nothing to free (it 2029 * points to the same buffer as the arc_buf_t's b_data field). 2030 */ 2031 if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_OFF) { 2032 hdr->b_l1hdr.b_tmp_cdata = NULL; 2033 return; 2034 } 2035 2036 /* 2037 * There's nothing to free since the buffer was all zero's and 2038 * compressed to a zero length buffer. 2039 */ 2040 if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_EMPTY) { 2041 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); 2042 return; 2043 } 2044 2045 ASSERT(L2ARC_IS_VALID_COMPRESS(hdr->b_l2hdr.b_compress)); 2046 2047 arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata, 2048 hdr->b_size, zio_data_buf_free); 2049 2050 ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write); 2051 hdr->b_l1hdr.b_tmp_cdata = NULL; 2052 } 2053 2054 /* 2055 * Free up buf->b_data and if 'remove' is set, then pull the 2056 * arc_buf_t off of the the arc_buf_hdr_t's list and free it. 2057 */ 2058 static void 2059 arc_buf_destroy(arc_buf_t *buf, boolean_t remove) 2060 { 2061 arc_buf_t **bufp; 2062 2063 /* free up data associated with the buf */ 2064 if (buf->b_data != NULL) { 2065 arc_state_t *state = buf->b_hdr->b_l1hdr.b_state; 2066 uint64_t size = buf->b_hdr->b_size; 2067 arc_buf_contents_t type = arc_buf_type(buf->b_hdr); 2068 2069 arc_cksum_verify(buf); 2070 arc_buf_unwatch(buf); 2071 2072 if (type == ARC_BUFC_METADATA) { 2073 arc_buf_data_free(buf, zio_buf_free); 2074 arc_space_return(size, ARC_SPACE_META); 2075 } else { 2076 ASSERT(type == ARC_BUFC_DATA); 2077 arc_buf_data_free(buf, zio_data_buf_free); 2078 arc_space_return(size, ARC_SPACE_DATA); 2079 } 2080 2081 /* protected by hash lock, if in the hash table */ 2082 if (multilist_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) { 2083 uint64_t *cnt = &state->arcs_lsize[type]; 2084 2085 ASSERT(refcount_is_zero( 2086 &buf->b_hdr->b_l1hdr.b_refcnt)); 2087 ASSERT(state != arc_anon && state != arc_l2c_only); 2088 2089 ASSERT3U(*cnt, >=, size); 2090 atomic_add_64(cnt, -size); 2091 } 2092 2093 (void) refcount_remove_many(&state->arcs_size, size, buf); 2094 buf->b_data = NULL; 2095 2096 /* 2097 * If we're destroying a duplicate buffer make sure 2098 * that the appropriate statistics are updated. 2099 */ 2100 if (buf->b_hdr->b_l1hdr.b_datacnt > 1 && 2101 HDR_ISTYPE_DATA(buf->b_hdr)) { 2102 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 2103 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 2104 } 2105 ASSERT(buf->b_hdr->b_l1hdr.b_datacnt > 0); 2106 buf->b_hdr->b_l1hdr.b_datacnt -= 1; 2107 } 2108 2109 /* only remove the buf if requested */ 2110 if (!remove) 2111 return; 2112 2113 /* remove the buf from the hdr list */ 2114 for (bufp = &buf->b_hdr->b_l1hdr.b_buf; *bufp != buf; 2115 bufp = &(*bufp)->b_next) 2116 continue; 2117 *bufp = buf->b_next; 2118 buf->b_next = NULL; 2119 2120 ASSERT(buf->b_efunc == NULL); 2121 2122 /* clean up the buf */ 2123 buf->b_hdr = NULL; 2124 kmem_cache_free(buf_cache, buf); 2125 } 2126 2127 static void 2128 arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) 2129 { 2130 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 2131 l2arc_dev_t *dev = l2hdr->b_dev; 2132 2133 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 2134 ASSERT(HDR_HAS_L2HDR(hdr)); 2135 2136 list_remove(&dev->l2ad_buflist, hdr); 2137 2138 /* 2139 * We don't want to leak the b_tmp_cdata buffer that was 2140 * allocated in l2arc_write_buffers() 2141 */ 2142 arc_buf_l2_cdata_free(hdr); 2143 2144 /* 2145 * If the l2hdr's b_daddr is equal to L2ARC_ADDR_UNSET, then 2146 * this header is being processed by l2arc_write_buffers() (i.e. 2147 * it's in the first stage of l2arc_write_buffers()). 2148 * Re-affirming that truth here, just to serve as a reminder. If 2149 * b_daddr does not equal L2ARC_ADDR_UNSET, then the header may or 2150 * may not have its HDR_L2_WRITING flag set. (the write may have 2151 * completed, in which case HDR_L2_WRITING will be false and the 2152 * b_daddr field will point to the address of the buffer on disk). 2153 */ 2154 IMPLY(l2hdr->b_daddr == L2ARC_ADDR_UNSET, HDR_L2_WRITING(hdr)); 2155 2156 /* 2157 * If b_daddr is equal to L2ARC_ADDR_UNSET, we're racing with 2158 * l2arc_write_buffers(). Since we've just removed this header 2159 * from the l2arc buffer list, this header will never reach the 2160 * second stage of l2arc_write_buffers(), which increments the 2161 * accounting stats for this header. Thus, we must be careful 2162 * not to decrement them for this header either. 2163 */ 2164 if (l2hdr->b_daddr != L2ARC_ADDR_UNSET) { 2165 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 2166 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 2167 2168 vdev_space_update(dev->l2ad_vdev, 2169 -l2hdr->b_asize, 0, 0); 2170 2171 (void) refcount_remove_many(&dev->l2ad_alloc, 2172 l2hdr->b_asize, hdr); 2173 } 2174 2175 hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR; 2176 } 2177 2178 static void 2179 arc_hdr_destroy(arc_buf_hdr_t *hdr) 2180 { 2181 if (HDR_HAS_L1HDR(hdr)) { 2182 ASSERT(hdr->b_l1hdr.b_buf == NULL || 2183 hdr->b_l1hdr.b_datacnt > 0); 2184 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2185 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 2186 } 2187 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2188 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 2189 2190 if (HDR_HAS_L2HDR(hdr)) { 2191 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 2192 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); 2193 2194 if (!buflist_held) 2195 mutex_enter(&dev->l2ad_mtx); 2196 2197 /* 2198 * Even though we checked this conditional above, we 2199 * need to check this again now that we have the 2200 * l2ad_mtx. This is because we could be racing with 2201 * another thread calling l2arc_evict() which might have 2202 * destroyed this header's L2 portion as we were waiting 2203 * to acquire the l2ad_mtx. If that happens, we don't 2204 * want to re-destroy the header's L2 portion. 2205 */ 2206 if (HDR_HAS_L2HDR(hdr)) 2207 arc_hdr_l2hdr_destroy(hdr); 2208 2209 if (!buflist_held) 2210 mutex_exit(&dev->l2ad_mtx); 2211 } 2212 2213 if (!BUF_EMPTY(hdr)) 2214 buf_discard_identity(hdr); 2215 2216 if (hdr->b_freeze_cksum != NULL) { 2217 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 2218 hdr->b_freeze_cksum = NULL; 2219 } 2220 2221 if (HDR_HAS_L1HDR(hdr)) { 2222 while (hdr->b_l1hdr.b_buf) { 2223 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 2224 2225 if (buf->b_efunc != NULL) { 2226 mutex_enter(&arc_user_evicts_lock); 2227 mutex_enter(&buf->b_evict_lock); 2228 ASSERT(buf->b_hdr != NULL); 2229 arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE); 2230 hdr->b_l1hdr.b_buf = buf->b_next; 2231 buf->b_hdr = &arc_eviction_hdr; 2232 buf->b_next = arc_eviction_list; 2233 arc_eviction_list = buf; 2234 mutex_exit(&buf->b_evict_lock); 2235 cv_signal(&arc_user_evicts_cv); 2236 mutex_exit(&arc_user_evicts_lock); 2237 } else { 2238 arc_buf_destroy(hdr->b_l1hdr.b_buf, TRUE); 2239 } 2240 } 2241 #ifdef ZFS_DEBUG 2242 if (hdr->b_l1hdr.b_thawed != NULL) { 2243 kmem_free(hdr->b_l1hdr.b_thawed, 1); 2244 hdr->b_l1hdr.b_thawed = NULL; 2245 } 2246 #endif 2247 } 2248 2249 ASSERT3P(hdr->b_hash_next, ==, NULL); 2250 if (HDR_HAS_L1HDR(hdr)) { 2251 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 2252 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 2253 kmem_cache_free(hdr_full_cache, hdr); 2254 } else { 2255 kmem_cache_free(hdr_l2only_cache, hdr); 2256 } 2257 } 2258 2259 void 2260 arc_buf_free(arc_buf_t *buf, void *tag) 2261 { 2262 arc_buf_hdr_t *hdr = buf->b_hdr; 2263 int hashed = hdr->b_l1hdr.b_state != arc_anon; 2264 2265 ASSERT(buf->b_efunc == NULL); 2266 ASSERT(buf->b_data != NULL); 2267 2268 if (hashed) { 2269 kmutex_t *hash_lock = HDR_LOCK(hdr); 2270 2271 mutex_enter(hash_lock); 2272 hdr = buf->b_hdr; 2273 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 2274 2275 (void) remove_reference(hdr, hash_lock, tag); 2276 if (hdr->b_l1hdr.b_datacnt > 1) { 2277 arc_buf_destroy(buf, TRUE); 2278 } else { 2279 ASSERT(buf == hdr->b_l1hdr.b_buf); 2280 ASSERT(buf->b_efunc == NULL); 2281 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 2282 } 2283 mutex_exit(hash_lock); 2284 } else if (HDR_IO_IN_PROGRESS(hdr)) { 2285 int destroy_hdr; 2286 /* 2287 * We are in the middle of an async write. Don't destroy 2288 * this buffer unless the write completes before we finish 2289 * decrementing the reference count. 2290 */ 2291 mutex_enter(&arc_user_evicts_lock); 2292 (void) remove_reference(hdr, NULL, tag); 2293 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2294 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 2295 mutex_exit(&arc_user_evicts_lock); 2296 if (destroy_hdr) 2297 arc_hdr_destroy(hdr); 2298 } else { 2299 if (remove_reference(hdr, NULL, tag) > 0) 2300 arc_buf_destroy(buf, TRUE); 2301 else 2302 arc_hdr_destroy(hdr); 2303 } 2304 } 2305 2306 boolean_t 2307 arc_buf_remove_ref(arc_buf_t *buf, void* tag) 2308 { 2309 arc_buf_hdr_t *hdr = buf->b_hdr; 2310 kmutex_t *hash_lock = HDR_LOCK(hdr); 2311 boolean_t no_callback = (buf->b_efunc == NULL); 2312 2313 if (hdr->b_l1hdr.b_state == arc_anon) { 2314 ASSERT(hdr->b_l1hdr.b_datacnt == 1); 2315 arc_buf_free(buf, tag); 2316 return (no_callback); 2317 } 2318 2319 mutex_enter(hash_lock); 2320 hdr = buf->b_hdr; 2321 ASSERT(hdr->b_l1hdr.b_datacnt > 0); 2322 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 2323 ASSERT(hdr->b_l1hdr.b_state != arc_anon); 2324 ASSERT(buf->b_data != NULL); 2325 2326 (void) remove_reference(hdr, hash_lock, tag); 2327 if (hdr->b_l1hdr.b_datacnt > 1) { 2328 if (no_callback) 2329 arc_buf_destroy(buf, TRUE); 2330 } else if (no_callback) { 2331 ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL); 2332 ASSERT(buf->b_efunc == NULL); 2333 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 2334 } 2335 ASSERT(no_callback || hdr->b_l1hdr.b_datacnt > 1 || 2336 refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2337 mutex_exit(hash_lock); 2338 return (no_callback); 2339 } 2340 2341 int32_t 2342 arc_buf_size(arc_buf_t *buf) 2343 { 2344 return (buf->b_hdr->b_size); 2345 } 2346 2347 /* 2348 * Called from the DMU to determine if the current buffer should be 2349 * evicted. In order to ensure proper locking, the eviction must be initiated 2350 * from the DMU. Return true if the buffer is associated with user data and 2351 * duplicate buffers still exist. 2352 */ 2353 boolean_t 2354 arc_buf_eviction_needed(arc_buf_t *buf) 2355 { 2356 arc_buf_hdr_t *hdr; 2357 boolean_t evict_needed = B_FALSE; 2358 2359 if (zfs_disable_dup_eviction) 2360 return (B_FALSE); 2361 2362 mutex_enter(&buf->b_evict_lock); 2363 hdr = buf->b_hdr; 2364 if (hdr == NULL) { 2365 /* 2366 * We are in arc_do_user_evicts(); let that function 2367 * perform the eviction. 2368 */ 2369 ASSERT(buf->b_data == NULL); 2370 mutex_exit(&buf->b_evict_lock); 2371 return (B_FALSE); 2372 } else if (buf->b_data == NULL) { 2373 /* 2374 * We have already been added to the arc eviction list; 2375 * recommend eviction. 2376 */ 2377 ASSERT3P(hdr, ==, &arc_eviction_hdr); 2378 mutex_exit(&buf->b_evict_lock); 2379 return (B_TRUE); 2380 } 2381 2382 if (hdr->b_l1hdr.b_datacnt > 1 && HDR_ISTYPE_DATA(hdr)) 2383 evict_needed = B_TRUE; 2384 2385 mutex_exit(&buf->b_evict_lock); 2386 return (evict_needed); 2387 } 2388 2389 /* 2390 * Evict the arc_buf_hdr that is provided as a parameter. The resultant 2391 * state of the header is dependent on it's state prior to entering this 2392 * function. The following transitions are possible: 2393 * 2394 * - arc_mru -> arc_mru_ghost 2395 * - arc_mfu -> arc_mfu_ghost 2396 * - arc_mru_ghost -> arc_l2c_only 2397 * - arc_mru_ghost -> deleted 2398 * - arc_mfu_ghost -> arc_l2c_only 2399 * - arc_mfu_ghost -> deleted 2400 */ 2401 static int64_t 2402 arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 2403 { 2404 arc_state_t *evicted_state, *state; 2405 int64_t bytes_evicted = 0; 2406 2407 ASSERT(MUTEX_HELD(hash_lock)); 2408 ASSERT(HDR_HAS_L1HDR(hdr)); 2409 2410 state = hdr->b_l1hdr.b_state; 2411 if (GHOST_STATE(state)) { 2412 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2413 ASSERT(hdr->b_l1hdr.b_buf == NULL); 2414 2415 /* 2416 * l2arc_write_buffers() relies on a header's L1 portion 2417 * (i.e. it's b_tmp_cdata field) during it's write phase. 2418 * Thus, we cannot push a header onto the arc_l2c_only 2419 * state (removing it's L1 piece) until the header is 2420 * done being written to the l2arc. 2421 */ 2422 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { 2423 ARCSTAT_BUMP(arcstat_evict_l2_skip); 2424 return (bytes_evicted); 2425 } 2426 2427 ARCSTAT_BUMP(arcstat_deleted); 2428 bytes_evicted += hdr->b_size; 2429 2430 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 2431 2432 if (HDR_HAS_L2HDR(hdr)) { 2433 /* 2434 * This buffer is cached on the 2nd Level ARC; 2435 * don't destroy the header. 2436 */ 2437 arc_change_state(arc_l2c_only, hdr, hash_lock); 2438 /* 2439 * dropping from L1+L2 cached to L2-only, 2440 * realloc to remove the L1 header. 2441 */ 2442 hdr = arc_hdr_realloc(hdr, hdr_full_cache, 2443 hdr_l2only_cache); 2444 } else { 2445 arc_change_state(arc_anon, hdr, hash_lock); 2446 arc_hdr_destroy(hdr); 2447 } 2448 return (bytes_evicted); 2449 } 2450 2451 ASSERT(state == arc_mru || state == arc_mfu); 2452 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2453 2454 /* prefetch buffers have a minimum lifespan */ 2455 if (HDR_IO_IN_PROGRESS(hdr) || 2456 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && 2457 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < 2458 arc_min_prefetch_lifespan)) { 2459 ARCSTAT_BUMP(arcstat_evict_skip); 2460 return (bytes_evicted); 2461 } 2462 2463 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 2464 ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0); 2465 while (hdr->b_l1hdr.b_buf) { 2466 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 2467 if (!mutex_tryenter(&buf->b_evict_lock)) { 2468 ARCSTAT_BUMP(arcstat_mutex_miss); 2469 break; 2470 } 2471 if (buf->b_data != NULL) 2472 bytes_evicted += hdr->b_size; 2473 if (buf->b_efunc != NULL) { 2474 mutex_enter(&arc_user_evicts_lock); 2475 arc_buf_destroy(buf, FALSE); 2476 hdr->b_l1hdr.b_buf = buf->b_next; 2477 buf->b_hdr = &arc_eviction_hdr; 2478 buf->b_next = arc_eviction_list; 2479 arc_eviction_list = buf; 2480 cv_signal(&arc_user_evicts_cv); 2481 mutex_exit(&arc_user_evicts_lock); 2482 mutex_exit(&buf->b_evict_lock); 2483 } else { 2484 mutex_exit(&buf->b_evict_lock); 2485 arc_buf_destroy(buf, TRUE); 2486 } 2487 } 2488 2489 if (HDR_HAS_L2HDR(hdr)) { 2490 ARCSTAT_INCR(arcstat_evict_l2_cached, hdr->b_size); 2491 } else { 2492 if (l2arc_write_eligible(hdr->b_spa, hdr)) 2493 ARCSTAT_INCR(arcstat_evict_l2_eligible, hdr->b_size); 2494 else 2495 ARCSTAT_INCR(arcstat_evict_l2_ineligible, hdr->b_size); 2496 } 2497 2498 if (hdr->b_l1hdr.b_datacnt == 0) { 2499 arc_change_state(evicted_state, hdr, hash_lock); 2500 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2501 hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; 2502 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 2503 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 2504 } 2505 2506 return (bytes_evicted); 2507 } 2508 2509 static uint64_t 2510 arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, 2511 uint64_t spa, int64_t bytes) 2512 { 2513 multilist_sublist_t *mls; 2514 uint64_t bytes_evicted = 0; 2515 arc_buf_hdr_t *hdr; 2516 kmutex_t *hash_lock; 2517 int evict_count = 0; 2518 2519 ASSERT3P(marker, !=, NULL); 2520 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 2521 2522 mls = multilist_sublist_lock(ml, idx); 2523 2524 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; 2525 hdr = multilist_sublist_prev(mls, marker)) { 2526 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || 2527 (evict_count >= zfs_arc_evict_batch_limit)) 2528 break; 2529 2530 /* 2531 * To keep our iteration location, move the marker 2532 * forward. Since we're not holding hdr's hash lock, we 2533 * must be very careful and not remove 'hdr' from the 2534 * sublist. Otherwise, other consumers might mistake the 2535 * 'hdr' as not being on a sublist when they call the 2536 * multilist_link_active() function (they all rely on 2537 * the hash lock protecting concurrent insertions and 2538 * removals). multilist_sublist_move_forward() was 2539 * specifically implemented to ensure this is the case 2540 * (only 'marker' will be removed and re-inserted). 2541 */ 2542 multilist_sublist_move_forward(mls, marker); 2543 2544 /* 2545 * The only case where the b_spa field should ever be 2546 * zero, is the marker headers inserted by 2547 * arc_evict_state(). It's possible for multiple threads 2548 * to be calling arc_evict_state() concurrently (e.g. 2549 * dsl_pool_close() and zio_inject_fault()), so we must 2550 * skip any markers we see from these other threads. 2551 */ 2552 if (hdr->b_spa == 0) 2553 continue; 2554 2555 /* we're only interested in evicting buffers of a certain spa */ 2556 if (spa != 0 && hdr->b_spa != spa) { 2557 ARCSTAT_BUMP(arcstat_evict_skip); 2558 continue; 2559 } 2560 2561 hash_lock = HDR_LOCK(hdr); 2562 2563 /* 2564 * We aren't calling this function from any code path 2565 * that would already be holding a hash lock, so we're 2566 * asserting on this assumption to be defensive in case 2567 * this ever changes. Without this check, it would be 2568 * possible to incorrectly increment arcstat_mutex_miss 2569 * below (e.g. if the code changed such that we called 2570 * this function with a hash lock held). 2571 */ 2572 ASSERT(!MUTEX_HELD(hash_lock)); 2573 2574 if (mutex_tryenter(hash_lock)) { 2575 uint64_t evicted = arc_evict_hdr(hdr, hash_lock); 2576 mutex_exit(hash_lock); 2577 2578 bytes_evicted += evicted; 2579 2580 /* 2581 * If evicted is zero, arc_evict_hdr() must have 2582 * decided to skip this header, don't increment 2583 * evict_count in this case. 2584 */ 2585 if (evicted != 0) 2586 evict_count++; 2587 2588 /* 2589 * If arc_size isn't overflowing, signal any 2590 * threads that might happen to be waiting. 2591 * 2592 * For each header evicted, we wake up a single 2593 * thread. If we used cv_broadcast, we could 2594 * wake up "too many" threads causing arc_size 2595 * to significantly overflow arc_c; since 2596 * arc_get_data_buf() doesn't check for overflow 2597 * when it's woken up (it doesn't because it's 2598 * possible for the ARC to be overflowing while 2599 * full of un-evictable buffers, and the 2600 * function should proceed in this case). 2601 * 2602 * If threads are left sleeping, due to not 2603 * using cv_broadcast, they will be woken up 2604 * just before arc_reclaim_thread() sleeps. 2605 */ 2606 mutex_enter(&arc_reclaim_lock); 2607 if (!arc_is_overflowing()) 2608 cv_signal(&arc_reclaim_waiters_cv); 2609 mutex_exit(&arc_reclaim_lock); 2610 } else { 2611 ARCSTAT_BUMP(arcstat_mutex_miss); 2612 } 2613 } 2614 2615 multilist_sublist_unlock(mls); 2616 2617 return (bytes_evicted); 2618 } 2619 2620 /* 2621 * Evict buffers from the given arc state, until we've removed the 2622 * specified number of bytes. Move the removed buffers to the 2623 * appropriate evict state. 2624 * 2625 * This function makes a "best effort". It skips over any buffers 2626 * it can't get a hash_lock on, and so, may not catch all candidates. 2627 * It may also return without evicting as much space as requested. 2628 * 2629 * If bytes is specified using the special value ARC_EVICT_ALL, this 2630 * will evict all available (i.e. unlocked and evictable) buffers from 2631 * the given arc state; which is used by arc_flush(). 2632 */ 2633 static uint64_t 2634 arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, 2635 arc_buf_contents_t type) 2636 { 2637 uint64_t total_evicted = 0; 2638 multilist_t *ml = &state->arcs_list[type]; 2639 int num_sublists; 2640 arc_buf_hdr_t **markers; 2641 2642 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 2643 2644 num_sublists = multilist_get_num_sublists(ml); 2645 2646 /* 2647 * If we've tried to evict from each sublist, made some 2648 * progress, but still have not hit the target number of bytes 2649 * to evict, we want to keep trying. The markers allow us to 2650 * pick up where we left off for each individual sublist, rather 2651 * than starting from the tail each time. 2652 */ 2653 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); 2654 for (int i = 0; i < num_sublists; i++) { 2655 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); 2656 2657 /* 2658 * A b_spa of 0 is used to indicate that this header is 2659 * a marker. This fact is used in arc_adjust_type() and 2660 * arc_evict_state_impl(). 2661 */ 2662 markers[i]->b_spa = 0; 2663 2664 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 2665 multilist_sublist_insert_tail(mls, markers[i]); 2666 multilist_sublist_unlock(mls); 2667 } 2668 2669 /* 2670 * While we haven't hit our target number of bytes to evict, or 2671 * we're evicting all available buffers. 2672 */ 2673 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { 2674 /* 2675 * Start eviction using a randomly selected sublist, 2676 * this is to try and evenly balance eviction across all 2677 * sublists. Always starting at the same sublist 2678 * (e.g. index 0) would cause evictions to favor certain 2679 * sublists over others. 2680 */ 2681 int sublist_idx = multilist_get_random_index(ml); 2682 uint64_t scan_evicted = 0; 2683 2684 for (int i = 0; i < num_sublists; i++) { 2685 uint64_t bytes_remaining; 2686 uint64_t bytes_evicted; 2687 2688 if (bytes == ARC_EVICT_ALL) 2689 bytes_remaining = ARC_EVICT_ALL; 2690 else if (total_evicted < bytes) 2691 bytes_remaining = bytes - total_evicted; 2692 else 2693 break; 2694 2695 bytes_evicted = arc_evict_state_impl(ml, sublist_idx, 2696 markers[sublist_idx], spa, bytes_remaining); 2697 2698 scan_evicted += bytes_evicted; 2699 total_evicted += bytes_evicted; 2700 2701 /* we've reached the end, wrap to the beginning */ 2702 if (++sublist_idx >= num_sublists) 2703 sublist_idx = 0; 2704 } 2705 2706 /* 2707 * If we didn't evict anything during this scan, we have 2708 * no reason to believe we'll evict more during another 2709 * scan, so break the loop. 2710 */ 2711 if (scan_evicted == 0) { 2712 /* This isn't possible, let's make that obvious */ 2713 ASSERT3S(bytes, !=, 0); 2714 2715 /* 2716 * When bytes is ARC_EVICT_ALL, the only way to 2717 * break the loop is when scan_evicted is zero. 2718 * In that case, we actually have evicted enough, 2719 * so we don't want to increment the kstat. 2720 */ 2721 if (bytes != ARC_EVICT_ALL) { 2722 ASSERT3S(total_evicted, <, bytes); 2723 ARCSTAT_BUMP(arcstat_evict_not_enough); 2724 } 2725 2726 break; 2727 } 2728 } 2729 2730 for (int i = 0; i < num_sublists; i++) { 2731 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 2732 multilist_sublist_remove(mls, markers[i]); 2733 multilist_sublist_unlock(mls); 2734 2735 kmem_cache_free(hdr_full_cache, markers[i]); 2736 } 2737 kmem_free(markers, sizeof (*markers) * num_sublists); 2738 2739 return (total_evicted); 2740 } 2741 2742 /* 2743 * Flush all "evictable" data of the given type from the arc state 2744 * specified. This will not evict any "active" buffers (i.e. referenced). 2745 * 2746 * When 'retry' is set to FALSE, the function will make a single pass 2747 * over the state and evict any buffers that it can. Since it doesn't 2748 * continually retry the eviction, it might end up leaving some buffers 2749 * in the ARC due to lock misses. 2750 * 2751 * When 'retry' is set to TRUE, the function will continually retry the 2752 * eviction until *all* evictable buffers have been removed from the 2753 * state. As a result, if concurrent insertions into the state are 2754 * allowed (e.g. if the ARC isn't shutting down), this function might 2755 * wind up in an infinite loop, continually trying to evict buffers. 2756 */ 2757 static uint64_t 2758 arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, 2759 boolean_t retry) 2760 { 2761 uint64_t evicted = 0; 2762 2763 while (state->arcs_lsize[type] != 0) { 2764 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); 2765 2766 if (!retry) 2767 break; 2768 } 2769 2770 return (evicted); 2771 } 2772 2773 /* 2774 * Evict the specified number of bytes from the state specified, 2775 * restricting eviction to the spa and type given. This function 2776 * prevents us from trying to evict more from a state's list than 2777 * is "evictable", and to skip evicting altogether when passed a 2778 * negative value for "bytes". In contrast, arc_evict_state() will 2779 * evict everything it can, when passed a negative value for "bytes". 2780 */ 2781 static uint64_t 2782 arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, 2783 arc_buf_contents_t type) 2784 { 2785 int64_t delta; 2786 2787 if (bytes > 0 && state->arcs_lsize[type] > 0) { 2788 delta = MIN(state->arcs_lsize[type], bytes); 2789 return (arc_evict_state(state, spa, delta, type)); 2790 } 2791 2792 return (0); 2793 } 2794 2795 /* 2796 * Evict metadata buffers from the cache, such that arc_meta_used is 2797 * capped by the arc_meta_limit tunable. 2798 */ 2799 static uint64_t 2800 arc_adjust_meta(void) 2801 { 2802 uint64_t total_evicted = 0; 2803 int64_t target; 2804 2805 /* 2806 * If we're over the meta limit, we want to evict enough 2807 * metadata to get back under the meta limit. We don't want to 2808 * evict so much that we drop the MRU below arc_p, though. If 2809 * we're over the meta limit more than we're over arc_p, we 2810 * evict some from the MRU here, and some from the MFU below. 2811 */ 2812 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 2813 (int64_t)(refcount_count(&arc_anon->arcs_size) + 2814 refcount_count(&arc_mru->arcs_size) - arc_p)); 2815 2816 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 2817 2818 /* 2819 * Similar to the above, we want to evict enough bytes to get us 2820 * below the meta limit, but not so much as to drop us below the 2821 * space alloted to the MFU (which is defined as arc_c - arc_p). 2822 */ 2823 target = MIN((int64_t)(arc_meta_used - arc_meta_limit), 2824 (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); 2825 2826 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 2827 2828 return (total_evicted); 2829 } 2830 2831 /* 2832 * Return the type of the oldest buffer in the given arc state 2833 * 2834 * This function will select a random sublist of type ARC_BUFC_DATA and 2835 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist 2836 * is compared, and the type which contains the "older" buffer will be 2837 * returned. 2838 */ 2839 static arc_buf_contents_t 2840 arc_adjust_type(arc_state_t *state) 2841 { 2842 multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA]; 2843 multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA]; 2844 int data_idx = multilist_get_random_index(data_ml); 2845 int meta_idx = multilist_get_random_index(meta_ml); 2846 multilist_sublist_t *data_mls; 2847 multilist_sublist_t *meta_mls; 2848 arc_buf_contents_t type; 2849 arc_buf_hdr_t *data_hdr; 2850 arc_buf_hdr_t *meta_hdr; 2851 2852 /* 2853 * We keep the sublist lock until we're finished, to prevent 2854 * the headers from being destroyed via arc_evict_state(). 2855 */ 2856 data_mls = multilist_sublist_lock(data_ml, data_idx); 2857 meta_mls = multilist_sublist_lock(meta_ml, meta_idx); 2858 2859 /* 2860 * These two loops are to ensure we skip any markers that 2861 * might be at the tail of the lists due to arc_evict_state(). 2862 */ 2863 2864 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; 2865 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { 2866 if (data_hdr->b_spa != 0) 2867 break; 2868 } 2869 2870 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; 2871 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { 2872 if (meta_hdr->b_spa != 0) 2873 break; 2874 } 2875 2876 if (data_hdr == NULL && meta_hdr == NULL) { 2877 type = ARC_BUFC_DATA; 2878 } else if (data_hdr == NULL) { 2879 ASSERT3P(meta_hdr, !=, NULL); 2880 type = ARC_BUFC_METADATA; 2881 } else if (meta_hdr == NULL) { 2882 ASSERT3P(data_hdr, !=, NULL); 2883 type = ARC_BUFC_DATA; 2884 } else { 2885 ASSERT3P(data_hdr, !=, NULL); 2886 ASSERT3P(meta_hdr, !=, NULL); 2887 2888 /* The headers can't be on the sublist without an L1 header */ 2889 ASSERT(HDR_HAS_L1HDR(data_hdr)); 2890 ASSERT(HDR_HAS_L1HDR(meta_hdr)); 2891 2892 if (data_hdr->b_l1hdr.b_arc_access < 2893 meta_hdr->b_l1hdr.b_arc_access) { 2894 type = ARC_BUFC_DATA; 2895 } else { 2896 type = ARC_BUFC_METADATA; 2897 } 2898 } 2899 2900 multilist_sublist_unlock(meta_mls); 2901 multilist_sublist_unlock(data_mls); 2902 2903 return (type); 2904 } 2905 2906 /* 2907 * Evict buffers from the cache, such that arc_size is capped by arc_c. 2908 */ 2909 static uint64_t 2910 arc_adjust(void) 2911 { 2912 uint64_t total_evicted = 0; 2913 uint64_t bytes; 2914 int64_t target; 2915 2916 /* 2917 * If we're over arc_meta_limit, we want to correct that before 2918 * potentially evicting data buffers below. 2919 */ 2920 total_evicted += arc_adjust_meta(); 2921 2922 /* 2923 * Adjust MRU size 2924 * 2925 * If we're over the target cache size, we want to evict enough 2926 * from the list to get back to our target size. We don't want 2927 * to evict too much from the MRU, such that it drops below 2928 * arc_p. So, if we're over our target cache size more than 2929 * the MRU is over arc_p, we'll evict enough to get back to 2930 * arc_p here, and then evict more from the MFU below. 2931 */ 2932 target = MIN((int64_t)(arc_size - arc_c), 2933 (int64_t)(refcount_count(&arc_anon->arcs_size) + 2934 refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p)); 2935 2936 /* 2937 * If we're below arc_meta_min, always prefer to evict data. 2938 * Otherwise, try to satisfy the requested number of bytes to 2939 * evict from the type which contains older buffers; in an 2940 * effort to keep newer buffers in the cache regardless of their 2941 * type. If we cannot satisfy the number of bytes from this 2942 * type, spill over into the next type. 2943 */ 2944 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && 2945 arc_meta_used > arc_meta_min) { 2946 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 2947 total_evicted += bytes; 2948 2949 /* 2950 * If we couldn't evict our target number of bytes from 2951 * metadata, we try to get the rest from data. 2952 */ 2953 target -= bytes; 2954 2955 total_evicted += 2956 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 2957 } else { 2958 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 2959 total_evicted += bytes; 2960 2961 /* 2962 * If we couldn't evict our target number of bytes from 2963 * data, we try to get the rest from metadata. 2964 */ 2965 target -= bytes; 2966 2967 total_evicted += 2968 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 2969 } 2970 2971 /* 2972 * Adjust MFU size 2973 * 2974 * Now that we've tried to evict enough from the MRU to get its 2975 * size back to arc_p, if we're still above the target cache 2976 * size, we evict the rest from the MFU. 2977 */ 2978 target = arc_size - arc_c; 2979 2980 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && 2981 arc_meta_used > arc_meta_min) { 2982 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 2983 total_evicted += bytes; 2984 2985 /* 2986 * If we couldn't evict our target number of bytes from 2987 * metadata, we try to get the rest from data. 2988 */ 2989 target -= bytes; 2990 2991 total_evicted += 2992 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 2993 } else { 2994 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 2995 total_evicted += bytes; 2996 2997 /* 2998 * If we couldn't evict our target number of bytes from 2999 * data, we try to get the rest from data. 3000 */ 3001 target -= bytes; 3002 3003 total_evicted += 3004 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 3005 } 3006 3007 /* 3008 * Adjust ghost lists 3009 * 3010 * In addition to the above, the ARC also defines target values 3011 * for the ghost lists. The sum of the mru list and mru ghost 3012 * list should never exceed the target size of the cache, and 3013 * the sum of the mru list, mfu list, mru ghost list, and mfu 3014 * ghost list should never exceed twice the target size of the 3015 * cache. The following logic enforces these limits on the ghost 3016 * caches, and evicts from them as needed. 3017 */ 3018 target = refcount_count(&arc_mru->arcs_size) + 3019 refcount_count(&arc_mru_ghost->arcs_size) - arc_c; 3020 3021 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); 3022 total_evicted += bytes; 3023 3024 target -= bytes; 3025 3026 total_evicted += 3027 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); 3028 3029 /* 3030 * We assume the sum of the mru list and mfu list is less than 3031 * or equal to arc_c (we enforced this above), which means we 3032 * can use the simpler of the two equations below: 3033 * 3034 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c 3035 * mru ghost + mfu ghost <= arc_c 3036 */ 3037 target = refcount_count(&arc_mru_ghost->arcs_size) + 3038 refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; 3039 3040 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); 3041 total_evicted += bytes; 3042 3043 target -= bytes; 3044 3045 total_evicted += 3046 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); 3047 3048 return (total_evicted); 3049 } 3050 3051 static void 3052 arc_do_user_evicts(void) 3053 { 3054 mutex_enter(&arc_user_evicts_lock); 3055 while (arc_eviction_list != NULL) { 3056 arc_buf_t *buf = arc_eviction_list; 3057 arc_eviction_list = buf->b_next; 3058 mutex_enter(&buf->b_evict_lock); 3059 buf->b_hdr = NULL; 3060 mutex_exit(&buf->b_evict_lock); 3061 mutex_exit(&arc_user_evicts_lock); 3062 3063 if (buf->b_efunc != NULL) 3064 VERIFY0(buf->b_efunc(buf->b_private)); 3065 3066 buf->b_efunc = NULL; 3067 buf->b_private = NULL; 3068 kmem_cache_free(buf_cache, buf); 3069 mutex_enter(&arc_user_evicts_lock); 3070 } 3071 mutex_exit(&arc_user_evicts_lock); 3072 } 3073 3074 void 3075 arc_flush(spa_t *spa, boolean_t retry) 3076 { 3077 uint64_t guid = 0; 3078 3079 /* 3080 * If retry is TRUE, a spa must not be specified since we have 3081 * no good way to determine if all of a spa's buffers have been 3082 * evicted from an arc state. 3083 */ 3084 ASSERT(!retry || spa == 0); 3085 3086 if (spa != NULL) 3087 guid = spa_load_guid(spa); 3088 3089 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); 3090 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); 3091 3092 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); 3093 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); 3094 3095 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); 3096 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); 3097 3098 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); 3099 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); 3100 3101 arc_do_user_evicts(); 3102 ASSERT(spa || arc_eviction_list == NULL); 3103 } 3104 3105 void 3106 arc_shrink(int64_t to_free) 3107 { 3108 if (arc_c > arc_c_min) { 3109 3110 if (arc_c > arc_c_min + to_free) 3111 atomic_add_64(&arc_c, -to_free); 3112 else 3113 arc_c = arc_c_min; 3114 3115 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 3116 if (arc_c > arc_size) 3117 arc_c = MAX(arc_size, arc_c_min); 3118 if (arc_p > arc_c) 3119 arc_p = (arc_c >> 1); 3120 ASSERT(arc_c >= arc_c_min); 3121 ASSERT((int64_t)arc_p >= 0); 3122 } 3123 3124 if (arc_size > arc_c) 3125 (void) arc_adjust(); 3126 } 3127 3128 typedef enum free_memory_reason_t { 3129 FMR_UNKNOWN, 3130 FMR_NEEDFREE, 3131 FMR_LOTSFREE, 3132 FMR_SWAPFS_MINFREE, 3133 FMR_PAGES_PP_MAXIMUM, 3134 FMR_HEAP_ARENA, 3135 FMR_ZIO_ARENA, 3136 } free_memory_reason_t; 3137 3138 int64_t last_free_memory; 3139 free_memory_reason_t last_free_reason; 3140 3141 /* 3142 * Additional reserve of pages for pp_reserve. 3143 */ 3144 int64_t arc_pages_pp_reserve = 64; 3145 3146 /* 3147 * Additional reserve of pages for swapfs. 3148 */ 3149 int64_t arc_swapfs_reserve = 64; 3150 3151 /* 3152 * Return the amount of memory that can be consumed before reclaim will be 3153 * needed. Positive if there is sufficient free memory, negative indicates 3154 * the amount of memory that needs to be freed up. 3155 */ 3156 static int64_t 3157 arc_available_memory(void) 3158 { 3159 int64_t lowest = INT64_MAX; 3160 int64_t n; 3161 free_memory_reason_t r = FMR_UNKNOWN; 3162 3163 #ifdef _KERNEL 3164 if (needfree > 0) { 3165 n = PAGESIZE * (-needfree); 3166 if (n < lowest) { 3167 lowest = n; 3168 r = FMR_NEEDFREE; 3169 } 3170 } 3171 3172 /* 3173 * check that we're out of range of the pageout scanner. It starts to 3174 * schedule paging if freemem is less than lotsfree and needfree. 3175 * lotsfree is the high-water mark for pageout, and needfree is the 3176 * number of needed free pages. We add extra pages here to make sure 3177 * the scanner doesn't start up while we're freeing memory. 3178 */ 3179 n = PAGESIZE * (freemem - lotsfree - needfree - desfree); 3180 if (n < lowest) { 3181 lowest = n; 3182 r = FMR_LOTSFREE; 3183 } 3184 3185 /* 3186 * check to make sure that swapfs has enough space so that anon 3187 * reservations can still succeed. anon_resvmem() checks that the 3188 * availrmem is greater than swapfs_minfree, and the number of reserved 3189 * swap pages. We also add a bit of extra here just to prevent 3190 * circumstances from getting really dire. 3191 */ 3192 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - 3193 desfree - arc_swapfs_reserve); 3194 if (n < lowest) { 3195 lowest = n; 3196 r = FMR_SWAPFS_MINFREE; 3197 } 3198 3199 3200 /* 3201 * Check that we have enough availrmem that memory locking (e.g., via 3202 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 3203 * stores the number of pages that cannot be locked; when availrmem 3204 * drops below pages_pp_maximum, page locking mechanisms such as 3205 * page_pp_lock() will fail.) 3206 */ 3207 n = PAGESIZE * (availrmem - pages_pp_maximum - 3208 arc_pages_pp_reserve); 3209 if (n < lowest) { 3210 lowest = n; 3211 r = FMR_PAGES_PP_MAXIMUM; 3212 } 3213 3214 #if defined(__i386) 3215 /* 3216 * If we're on an i386 platform, it's possible that we'll exhaust the 3217 * kernel heap space before we ever run out of available physical 3218 * memory. Most checks of the size of the heap_area compare against 3219 * tune.t_minarmem, which is the minimum available real memory that we 3220 * can have in the system. However, this is generally fixed at 25 pages 3221 * which is so low that it's useless. In this comparison, we seek to 3222 * calculate the total heap-size, and reclaim if more than 3/4ths of the 3223 * heap is allocated. (Or, in the calculation, if less than 1/4th is 3224 * free) 3225 */ 3226 n = vmem_size(heap_arena, VMEM_FREE) - 3227 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); 3228 if (n < lowest) { 3229 lowest = n; 3230 r = FMR_HEAP_ARENA; 3231 } 3232 #endif 3233 3234 /* 3235 * If zio data pages are being allocated out of a separate heap segment, 3236 * then enforce that the size of available vmem for this arena remains 3237 * above about 1/16th free. 3238 * 3239 * Note: The 1/16th arena free requirement was put in place 3240 * to aggressively evict memory from the arc in order to avoid 3241 * memory fragmentation issues. 3242 */ 3243 if (zio_arena != NULL) { 3244 n = vmem_size(zio_arena, VMEM_FREE) - 3245 (vmem_size(zio_arena, VMEM_ALLOC) >> 4); 3246 if (n < lowest) { 3247 lowest = n; 3248 r = FMR_ZIO_ARENA; 3249 } 3250 } 3251 #else 3252 /* Every 100 calls, free a small amount */ 3253 if (spa_get_random(100) == 0) 3254 lowest = -1024; 3255 #endif 3256 3257 last_free_memory = lowest; 3258 last_free_reason = r; 3259 3260 return (lowest); 3261 } 3262 3263 3264 /* 3265 * Determine if the system is under memory pressure and is asking 3266 * to reclaim memory. A return value of TRUE indicates that the system 3267 * is under memory pressure and that the arc should adjust accordingly. 3268 */ 3269 static boolean_t 3270 arc_reclaim_needed(void) 3271 { 3272 return (arc_available_memory() < 0); 3273 } 3274 3275 static void 3276 arc_kmem_reap_now(void) 3277 { 3278 size_t i; 3279 kmem_cache_t *prev_cache = NULL; 3280 kmem_cache_t *prev_data_cache = NULL; 3281 extern kmem_cache_t *zio_buf_cache[]; 3282 extern kmem_cache_t *zio_data_buf_cache[]; 3283 extern kmem_cache_t *range_seg_cache; 3284 3285 #ifdef _KERNEL 3286 if (arc_meta_used >= arc_meta_limit) { 3287 /* 3288 * We are exceeding our meta-data cache limit. 3289 * Purge some DNLC entries to release holds on meta-data. 3290 */ 3291 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 3292 } 3293 #if defined(__i386) 3294 /* 3295 * Reclaim unused memory from all kmem caches. 3296 */ 3297 kmem_reap(); 3298 #endif 3299 #endif 3300 3301 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 3302 if (zio_buf_cache[i] != prev_cache) { 3303 prev_cache = zio_buf_cache[i]; 3304 kmem_cache_reap_now(zio_buf_cache[i]); 3305 } 3306 if (zio_data_buf_cache[i] != prev_data_cache) { 3307 prev_data_cache = zio_data_buf_cache[i]; 3308 kmem_cache_reap_now(zio_data_buf_cache[i]); 3309 } 3310 } 3311 kmem_cache_reap_now(buf_cache); 3312 kmem_cache_reap_now(hdr_full_cache); 3313 kmem_cache_reap_now(hdr_l2only_cache); 3314 kmem_cache_reap_now(range_seg_cache); 3315 3316 if (zio_arena != NULL) { 3317 /* 3318 * Ask the vmem arena to reclaim unused memory from its 3319 * quantum caches. 3320 */ 3321 vmem_qcache_reap(zio_arena); 3322 } 3323 } 3324 3325 /* 3326 * Threads can block in arc_get_data_buf() waiting for this thread to evict 3327 * enough data and signal them to proceed. When this happens, the threads in 3328 * arc_get_data_buf() are sleeping while holding the hash lock for their 3329 * particular arc header. Thus, we must be careful to never sleep on a 3330 * hash lock in this thread. This is to prevent the following deadlock: 3331 * 3332 * - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L", 3333 * waiting for the reclaim thread to signal it. 3334 * 3335 * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter, 3336 * fails, and goes to sleep forever. 3337 * 3338 * This possible deadlock is avoided by always acquiring a hash lock 3339 * using mutex_tryenter() from arc_reclaim_thread(). 3340 */ 3341 static void 3342 arc_reclaim_thread(void) 3343 { 3344 clock_t growtime = 0; 3345 callb_cpr_t cpr; 3346 3347 CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG); 3348 3349 mutex_enter(&arc_reclaim_lock); 3350 while (!arc_reclaim_thread_exit) { 3351 int64_t free_memory = arc_available_memory(); 3352 uint64_t evicted = 0; 3353 3354 mutex_exit(&arc_reclaim_lock); 3355 3356 if (free_memory < 0) { 3357 3358 arc_no_grow = B_TRUE; 3359 arc_warm = B_TRUE; 3360 3361 /* 3362 * Wait at least zfs_grow_retry (default 60) seconds 3363 * before considering growing. 3364 */ 3365 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 3366 3367 arc_kmem_reap_now(); 3368 3369 /* 3370 * If we are still low on memory, shrink the ARC 3371 * so that we have arc_shrink_min free space. 3372 */ 3373 free_memory = arc_available_memory(); 3374 3375 int64_t to_free = 3376 (arc_c >> arc_shrink_shift) - free_memory; 3377 if (to_free > 0) { 3378 #ifdef _KERNEL 3379 to_free = MAX(to_free, ptob(needfree)); 3380 #endif 3381 arc_shrink(to_free); 3382 } 3383 } else if (free_memory < arc_c >> arc_no_grow_shift) { 3384 arc_no_grow = B_TRUE; 3385 } else if (ddi_get_lbolt() >= growtime) { 3386 arc_no_grow = B_FALSE; 3387 } 3388 3389 evicted = arc_adjust(); 3390 3391 mutex_enter(&arc_reclaim_lock); 3392 3393 /* 3394 * If evicted is zero, we couldn't evict anything via 3395 * arc_adjust(). This could be due to hash lock 3396 * collisions, but more likely due to the majority of 3397 * arc buffers being unevictable. Therefore, even if 3398 * arc_size is above arc_c, another pass is unlikely to 3399 * be helpful and could potentially cause us to enter an 3400 * infinite loop. 3401 */ 3402 if (arc_size <= arc_c || evicted == 0) { 3403 /* 3404 * We're either no longer overflowing, or we 3405 * can't evict anything more, so we should wake 3406 * up any threads before we go to sleep. 3407 */ 3408 cv_broadcast(&arc_reclaim_waiters_cv); 3409 3410 /* 3411 * Block until signaled, or after one second (we 3412 * might need to perform arc_kmem_reap_now() 3413 * even if we aren't being signalled) 3414 */ 3415 CALLB_CPR_SAFE_BEGIN(&cpr); 3416 (void) cv_timedwait(&arc_reclaim_thread_cv, 3417 &arc_reclaim_lock, ddi_get_lbolt() + hz); 3418 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock); 3419 } 3420 } 3421 3422 arc_reclaim_thread_exit = FALSE; 3423 cv_broadcast(&arc_reclaim_thread_cv); 3424 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */ 3425 thread_exit(); 3426 } 3427 3428 static void 3429 arc_user_evicts_thread(void) 3430 { 3431 callb_cpr_t cpr; 3432 3433 CALLB_CPR_INIT(&cpr, &arc_user_evicts_lock, callb_generic_cpr, FTAG); 3434 3435 mutex_enter(&arc_user_evicts_lock); 3436 while (!arc_user_evicts_thread_exit) { 3437 mutex_exit(&arc_user_evicts_lock); 3438 3439 arc_do_user_evicts(); 3440 3441 /* 3442 * This is necessary in order for the mdb ::arc dcmd to 3443 * show up to date information. Since the ::arc command 3444 * does not call the kstat's update function, without 3445 * this call, the command may show stale stats for the 3446 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even 3447 * with this change, the data might be up to 1 second 3448 * out of date; but that should suffice. The arc_state_t 3449 * structures can be queried directly if more accurate 3450 * information is needed. 3451 */ 3452 if (arc_ksp != NULL) 3453 arc_ksp->ks_update(arc_ksp, KSTAT_READ); 3454 3455 mutex_enter(&arc_user_evicts_lock); 3456 3457 /* 3458 * Block until signaled, or after one second (we need to 3459 * call the arc's kstat update function regularly). 3460 */ 3461 CALLB_CPR_SAFE_BEGIN(&cpr); 3462 (void) cv_timedwait(&arc_user_evicts_cv, 3463 &arc_user_evicts_lock, ddi_get_lbolt() + hz); 3464 CALLB_CPR_SAFE_END(&cpr, &arc_user_evicts_lock); 3465 } 3466 3467 arc_user_evicts_thread_exit = FALSE; 3468 cv_broadcast(&arc_user_evicts_cv); 3469 CALLB_CPR_EXIT(&cpr); /* drops arc_user_evicts_lock */ 3470 thread_exit(); 3471 } 3472 3473 /* 3474 * Adapt arc info given the number of bytes we are trying to add and 3475 * the state that we are comming from. This function is only called 3476 * when we are adding new content to the cache. 3477 */ 3478 static void 3479 arc_adapt(int bytes, arc_state_t *state) 3480 { 3481 int mult; 3482 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 3483 int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); 3484 int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); 3485 3486 if (state == arc_l2c_only) 3487 return; 3488 3489 ASSERT(bytes > 0); 3490 /* 3491 * Adapt the target size of the MRU list: 3492 * - if we just hit in the MRU ghost list, then increase 3493 * the target size of the MRU list. 3494 * - if we just hit in the MFU ghost list, then increase 3495 * the target size of the MFU list by decreasing the 3496 * target size of the MRU list. 3497 */ 3498 if (state == arc_mru_ghost) { 3499 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); 3500 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 3501 3502 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 3503 } else if (state == arc_mfu_ghost) { 3504 uint64_t delta; 3505 3506 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); 3507 mult = MIN(mult, 10); 3508 3509 delta = MIN(bytes * mult, arc_p); 3510 arc_p = MAX(arc_p_min, arc_p - delta); 3511 } 3512 ASSERT((int64_t)arc_p >= 0); 3513 3514 if (arc_reclaim_needed()) { 3515 cv_signal(&arc_reclaim_thread_cv); 3516 return; 3517 } 3518 3519 if (arc_no_grow) 3520 return; 3521 3522 if (arc_c >= arc_c_max) 3523 return; 3524 3525 /* 3526 * If we're within (2 * maxblocksize) bytes of the target 3527 * cache size, increment the target cache size 3528 */ 3529 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 3530 atomic_add_64(&arc_c, (int64_t)bytes); 3531 if (arc_c > arc_c_max) 3532 arc_c = arc_c_max; 3533 else if (state == arc_anon) 3534 atomic_add_64(&arc_p, (int64_t)bytes); 3535 if (arc_p > arc_c) 3536 arc_p = arc_c; 3537 } 3538 ASSERT((int64_t)arc_p >= 0); 3539 } 3540 3541 /* 3542 * Check if arc_size has grown past our upper threshold, determined by 3543 * zfs_arc_overflow_shift. 3544 */ 3545 static boolean_t 3546 arc_is_overflowing(void) 3547 { 3548 /* Always allow at least one block of overflow */ 3549 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, 3550 arc_c >> zfs_arc_overflow_shift); 3551 3552 return (arc_size >= arc_c + overflow); 3553 } 3554 3555 /* 3556 * The buffer, supplied as the first argument, needs a data block. If we 3557 * are hitting the hard limit for the cache size, we must sleep, waiting 3558 * for the eviction thread to catch up. If we're past the target size 3559 * but below the hard limit, we'll only signal the reclaim thread and 3560 * continue on. 3561 */ 3562 static void 3563 arc_get_data_buf(arc_buf_t *buf) 3564 { 3565 arc_state_t *state = buf->b_hdr->b_l1hdr.b_state; 3566 uint64_t size = buf->b_hdr->b_size; 3567 arc_buf_contents_t type = arc_buf_type(buf->b_hdr); 3568 3569 arc_adapt(size, state); 3570 3571 /* 3572 * If arc_size is currently overflowing, and has grown past our 3573 * upper limit, we must be adding data faster than the evict 3574 * thread can evict. Thus, to ensure we don't compound the 3575 * problem by adding more data and forcing arc_size to grow even 3576 * further past it's target size, we halt and wait for the 3577 * eviction thread to catch up. 3578 * 3579 * It's also possible that the reclaim thread is unable to evict 3580 * enough buffers to get arc_size below the overflow limit (e.g. 3581 * due to buffers being un-evictable, or hash lock collisions). 3582 * In this case, we want to proceed regardless if we're 3583 * overflowing; thus we don't use a while loop here. 3584 */ 3585 if (arc_is_overflowing()) { 3586 mutex_enter(&arc_reclaim_lock); 3587 3588 /* 3589 * Now that we've acquired the lock, we may no longer be 3590 * over the overflow limit, lets check. 3591 * 3592 * We're ignoring the case of spurious wake ups. If that 3593 * were to happen, it'd let this thread consume an ARC 3594 * buffer before it should have (i.e. before we're under 3595 * the overflow limit and were signalled by the reclaim 3596 * thread). As long as that is a rare occurrence, it 3597 * shouldn't cause any harm. 3598 */ 3599 if (arc_is_overflowing()) { 3600 cv_signal(&arc_reclaim_thread_cv); 3601 cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); 3602 } 3603 3604 mutex_exit(&arc_reclaim_lock); 3605 } 3606 3607 if (type == ARC_BUFC_METADATA) { 3608 buf->b_data = zio_buf_alloc(size); 3609 arc_space_consume(size, ARC_SPACE_META); 3610 } else { 3611 ASSERT(type == ARC_BUFC_DATA); 3612 buf->b_data = zio_data_buf_alloc(size); 3613 arc_space_consume(size, ARC_SPACE_DATA); 3614 } 3615 3616 /* 3617 * Update the state size. Note that ghost states have a 3618 * "ghost size" and so don't need to be updated. 3619 */ 3620 if (!GHOST_STATE(buf->b_hdr->b_l1hdr.b_state)) { 3621 arc_buf_hdr_t *hdr = buf->b_hdr; 3622 arc_state_t *state = hdr->b_l1hdr.b_state; 3623 3624 (void) refcount_add_many(&state->arcs_size, size, buf); 3625 3626 /* 3627 * If this is reached via arc_read, the link is 3628 * protected by the hash lock. If reached via 3629 * arc_buf_alloc, the header should not be accessed by 3630 * any other thread. And, if reached via arc_read_done, 3631 * the hash lock will protect it if it's found in the 3632 * hash table; otherwise no other thread should be 3633 * trying to [add|remove]_reference it. 3634 */ 3635 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 3636 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3637 atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type], 3638 size); 3639 } 3640 /* 3641 * If we are growing the cache, and we are adding anonymous 3642 * data, and we have outgrown arc_p, update arc_p 3643 */ 3644 if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon && 3645 (refcount_count(&arc_anon->arcs_size) + 3646 refcount_count(&arc_mru->arcs_size) > arc_p)) 3647 arc_p = MIN(arc_c, arc_p + size); 3648 } 3649 } 3650 3651 /* 3652 * This routine is called whenever a buffer is accessed. 3653 * NOTE: the hash lock is dropped in this function. 3654 */ 3655 static void 3656 arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 3657 { 3658 clock_t now; 3659 3660 ASSERT(MUTEX_HELD(hash_lock)); 3661 ASSERT(HDR_HAS_L1HDR(hdr)); 3662 3663 if (hdr->b_l1hdr.b_state == arc_anon) { 3664 /* 3665 * This buffer is not in the cache, and does not 3666 * appear in our "ghost" list. Add the new buffer 3667 * to the MRU state. 3668 */ 3669 3670 ASSERT0(hdr->b_l1hdr.b_arc_access); 3671 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 3672 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 3673 arc_change_state(arc_mru, hdr, hash_lock); 3674 3675 } else if (hdr->b_l1hdr.b_state == arc_mru) { 3676 now = ddi_get_lbolt(); 3677 3678 /* 3679 * If this buffer is here because of a prefetch, then either: 3680 * - clear the flag if this is a "referencing" read 3681 * (any subsequent access will bump this into the MFU state). 3682 * or 3683 * - move the buffer to the head of the list if this is 3684 * another prefetch (to make it less likely to be evicted). 3685 */ 3686 if (HDR_PREFETCH(hdr)) { 3687 if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 3688 /* link protected by hash lock */ 3689 ASSERT(multilist_link_active( 3690 &hdr->b_l1hdr.b_arc_node)); 3691 } else { 3692 hdr->b_flags &= ~ARC_FLAG_PREFETCH; 3693 ARCSTAT_BUMP(arcstat_mru_hits); 3694 } 3695 hdr->b_l1hdr.b_arc_access = now; 3696 return; 3697 } 3698 3699 /* 3700 * This buffer has been "accessed" only once so far, 3701 * but it is still in the cache. Move it to the MFU 3702 * state. 3703 */ 3704 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) { 3705 /* 3706 * More than 125ms have passed since we 3707 * instantiated this buffer. Move it to the 3708 * most frequently used state. 3709 */ 3710 hdr->b_l1hdr.b_arc_access = now; 3711 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 3712 arc_change_state(arc_mfu, hdr, hash_lock); 3713 } 3714 ARCSTAT_BUMP(arcstat_mru_hits); 3715 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { 3716 arc_state_t *new_state; 3717 /* 3718 * This buffer has been "accessed" recently, but 3719 * was evicted from the cache. Move it to the 3720 * MFU state. 3721 */ 3722 3723 if (HDR_PREFETCH(hdr)) { 3724 new_state = arc_mru; 3725 if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) 3726 hdr->b_flags &= ~ARC_FLAG_PREFETCH; 3727 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 3728 } else { 3729 new_state = arc_mfu; 3730 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 3731 } 3732 3733 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 3734 arc_change_state(new_state, hdr, hash_lock); 3735 3736 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 3737 } else if (hdr->b_l1hdr.b_state == arc_mfu) { 3738 /* 3739 * This buffer has been accessed more than once and is 3740 * still in the cache. Keep it in the MFU state. 3741 * 3742 * NOTE: an add_reference() that occurred when we did 3743 * the arc_read() will have kicked this off the list. 3744 * If it was a prefetch, we will explicitly move it to 3745 * the head of the list now. 3746 */ 3747 if ((HDR_PREFETCH(hdr)) != 0) { 3748 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3749 /* link protected by hash_lock */ 3750 ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3751 } 3752 ARCSTAT_BUMP(arcstat_mfu_hits); 3753 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 3754 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { 3755 arc_state_t *new_state = arc_mfu; 3756 /* 3757 * This buffer has been accessed more than once but has 3758 * been evicted from the cache. Move it back to the 3759 * MFU state. 3760 */ 3761 3762 if (HDR_PREFETCH(hdr)) { 3763 /* 3764 * This is a prefetch access... 3765 * move this block back to the MRU state. 3766 */ 3767 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 3768 new_state = arc_mru; 3769 } 3770 3771 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 3772 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 3773 arc_change_state(new_state, hdr, hash_lock); 3774 3775 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 3776 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { 3777 /* 3778 * This buffer is on the 2nd Level ARC. 3779 */ 3780 3781 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 3782 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 3783 arc_change_state(arc_mfu, hdr, hash_lock); 3784 } else { 3785 ASSERT(!"invalid arc state"); 3786 } 3787 } 3788 3789 /* a generic arc_done_func_t which you can use */ 3790 /* ARGSUSED */ 3791 void 3792 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 3793 { 3794 if (zio == NULL || zio->io_error == 0) 3795 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 3796 VERIFY(arc_buf_remove_ref(buf, arg)); 3797 } 3798 3799 /* a generic arc_done_func_t */ 3800 void 3801 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 3802 { 3803 arc_buf_t **bufp = arg; 3804 if (zio && zio->io_error) { 3805 VERIFY(arc_buf_remove_ref(buf, arg)); 3806 *bufp = NULL; 3807 } else { 3808 *bufp = buf; 3809 ASSERT(buf->b_data); 3810 } 3811 } 3812 3813 static void 3814 arc_read_done(zio_t *zio) 3815 { 3816 arc_buf_hdr_t *hdr; 3817 arc_buf_t *buf; 3818 arc_buf_t *abuf; /* buffer we're assigning to callback */ 3819 kmutex_t *hash_lock = NULL; 3820 arc_callback_t *callback_list, *acb; 3821 int freeable = FALSE; 3822 3823 buf = zio->io_private; 3824 hdr = buf->b_hdr; 3825 3826 /* 3827 * The hdr was inserted into hash-table and removed from lists 3828 * prior to starting I/O. We should find this header, since 3829 * it's in the hash table, and it should be legit since it's 3830 * not possible to evict it during the I/O. The only possible 3831 * reason for it not to be found is if we were freed during the 3832 * read. 3833 */ 3834 if (HDR_IN_HASH_TABLE(hdr)) { 3835 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 3836 ASSERT3U(hdr->b_dva.dva_word[0], ==, 3837 BP_IDENTITY(zio->io_bp)->dva_word[0]); 3838 ASSERT3U(hdr->b_dva.dva_word[1], ==, 3839 BP_IDENTITY(zio->io_bp)->dva_word[1]); 3840 3841 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 3842 &hash_lock); 3843 3844 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && 3845 hash_lock == NULL) || 3846 (found == hdr && 3847 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 3848 (found == hdr && HDR_L2_READING(hdr))); 3849 } 3850 3851 hdr->b_flags &= ~ARC_FLAG_L2_EVICTED; 3852 if (l2arc_noprefetch && HDR_PREFETCH(hdr)) 3853 hdr->b_flags &= ~ARC_FLAG_L2CACHE; 3854 3855 /* byteswap if necessary */ 3856 callback_list = hdr->b_l1hdr.b_acb; 3857 ASSERT(callback_list != NULL); 3858 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 3859 dmu_object_byteswap_t bswap = 3860 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 3861 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 3862 byteswap_uint64_array : 3863 dmu_ot_byteswap[bswap].ob_func; 3864 func(buf->b_data, hdr->b_size); 3865 } 3866 3867 arc_cksum_compute(buf, B_FALSE); 3868 arc_buf_watch(buf); 3869 3870 if (hash_lock && zio->io_error == 0 && 3871 hdr->b_l1hdr.b_state == arc_anon) { 3872 /* 3873 * Only call arc_access on anonymous buffers. This is because 3874 * if we've issued an I/O for an evicted buffer, we've already 3875 * called arc_access (to prevent any simultaneous readers from 3876 * getting confused). 3877 */ 3878 arc_access(hdr, hash_lock); 3879 } 3880 3881 /* create copies of the data buffer for the callers */ 3882 abuf = buf; 3883 for (acb = callback_list; acb; acb = acb->acb_next) { 3884 if (acb->acb_done) { 3885 if (abuf == NULL) { 3886 ARCSTAT_BUMP(arcstat_duplicate_reads); 3887 abuf = arc_buf_clone(buf); 3888 } 3889 acb->acb_buf = abuf; 3890 abuf = NULL; 3891 } 3892 } 3893 hdr->b_l1hdr.b_acb = NULL; 3894 hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 3895 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 3896 if (abuf == buf) { 3897 ASSERT(buf->b_efunc == NULL); 3898 ASSERT(hdr->b_l1hdr.b_datacnt == 1); 3899 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 3900 } 3901 3902 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || 3903 callback_list != NULL); 3904 3905 if (zio->io_error != 0) { 3906 hdr->b_flags |= ARC_FLAG_IO_ERROR; 3907 if (hdr->b_l1hdr.b_state != arc_anon) 3908 arc_change_state(arc_anon, hdr, hash_lock); 3909 if (HDR_IN_HASH_TABLE(hdr)) 3910 buf_hash_remove(hdr); 3911 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 3912 } 3913 3914 /* 3915 * Broadcast before we drop the hash_lock to avoid the possibility 3916 * that the hdr (and hence the cv) might be freed before we get to 3917 * the cv_broadcast(). 3918 */ 3919 cv_broadcast(&hdr->b_l1hdr.b_cv); 3920 3921 if (hash_lock != NULL) { 3922 mutex_exit(hash_lock); 3923 } else { 3924 /* 3925 * This block was freed while we waited for the read to 3926 * complete. It has been removed from the hash table and 3927 * moved to the anonymous state (so that it won't show up 3928 * in the cache). 3929 */ 3930 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3931 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 3932 } 3933 3934 /* execute each callback and free its structure */ 3935 while ((acb = callback_list) != NULL) { 3936 if (acb->acb_done) 3937 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 3938 3939 if (acb->acb_zio_dummy != NULL) { 3940 acb->acb_zio_dummy->io_error = zio->io_error; 3941 zio_nowait(acb->acb_zio_dummy); 3942 } 3943 3944 callback_list = acb->acb_next; 3945 kmem_free(acb, sizeof (arc_callback_t)); 3946 } 3947 3948 if (freeable) 3949 arc_hdr_destroy(hdr); 3950 } 3951 3952 /* 3953 * "Read" the block at the specified DVA (in bp) via the 3954 * cache. If the block is found in the cache, invoke the provided 3955 * callback immediately and return. Note that the `zio' parameter 3956 * in the callback will be NULL in this case, since no IO was 3957 * required. If the block is not in the cache pass the read request 3958 * on to the spa with a substitute callback function, so that the 3959 * requested block will be added to the cache. 3960 * 3961 * If a read request arrives for a block that has a read in-progress, 3962 * either wait for the in-progress read to complete (and return the 3963 * results); or, if this is a read with a "done" func, add a record 3964 * to the read to invoke the "done" func when the read completes, 3965 * and return; or just return. 3966 * 3967 * arc_read_done() will invoke all the requested "done" functions 3968 * for readers of this block. 3969 */ 3970 int 3971 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 3972 void *private, zio_priority_t priority, int zio_flags, 3973 arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 3974 { 3975 arc_buf_hdr_t *hdr = NULL; 3976 arc_buf_t *buf = NULL; 3977 kmutex_t *hash_lock = NULL; 3978 zio_t *rzio; 3979 uint64_t guid = spa_load_guid(spa); 3980 3981 ASSERT(!BP_IS_EMBEDDED(bp) || 3982 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 3983 3984 top: 3985 if (!BP_IS_EMBEDDED(bp)) { 3986 /* 3987 * Embedded BP's have no DVA and require no I/O to "read". 3988 * Create an anonymous arc buf to back it. 3989 */ 3990 hdr = buf_hash_find(guid, bp, &hash_lock); 3991 } 3992 3993 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_datacnt > 0) { 3994 3995 *arc_flags |= ARC_FLAG_CACHED; 3996 3997 if (HDR_IO_IN_PROGRESS(hdr)) { 3998 3999 if (*arc_flags & ARC_FLAG_WAIT) { 4000 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 4001 mutex_exit(hash_lock); 4002 goto top; 4003 } 4004 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 4005 4006 if (done) { 4007 arc_callback_t *acb = NULL; 4008 4009 acb = kmem_zalloc(sizeof (arc_callback_t), 4010 KM_SLEEP); 4011 acb->acb_done = done; 4012 acb->acb_private = private; 4013 if (pio != NULL) 4014 acb->acb_zio_dummy = zio_null(pio, 4015 spa, NULL, NULL, NULL, zio_flags); 4016 4017 ASSERT(acb->acb_done != NULL); 4018 acb->acb_next = hdr->b_l1hdr.b_acb; 4019 hdr->b_l1hdr.b_acb = acb; 4020 add_reference(hdr, hash_lock, private); 4021 mutex_exit(hash_lock); 4022 return (0); 4023 } 4024 mutex_exit(hash_lock); 4025 return (0); 4026 } 4027 4028 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 4029 hdr->b_l1hdr.b_state == arc_mfu); 4030 4031 if (done) { 4032 add_reference(hdr, hash_lock, private); 4033 /* 4034 * If this block is already in use, create a new 4035 * copy of the data so that we will be guaranteed 4036 * that arc_release() will always succeed. 4037 */ 4038 buf = hdr->b_l1hdr.b_buf; 4039 ASSERT(buf); 4040 ASSERT(buf->b_data); 4041 if (HDR_BUF_AVAILABLE(hdr)) { 4042 ASSERT(buf->b_efunc == NULL); 4043 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 4044 } else { 4045 buf = arc_buf_clone(buf); 4046 } 4047 4048 } else if (*arc_flags & ARC_FLAG_PREFETCH && 4049 refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 4050 hdr->b_flags |= ARC_FLAG_PREFETCH; 4051 } 4052 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 4053 arc_access(hdr, hash_lock); 4054 if (*arc_flags & ARC_FLAG_L2CACHE) 4055 hdr->b_flags |= ARC_FLAG_L2CACHE; 4056 if (*arc_flags & ARC_FLAG_L2COMPRESS) 4057 hdr->b_flags |= ARC_FLAG_L2COMPRESS; 4058 mutex_exit(hash_lock); 4059 ARCSTAT_BUMP(arcstat_hits); 4060 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 4061 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 4062 data, metadata, hits); 4063 4064 if (done) 4065 done(NULL, buf, private); 4066 } else { 4067 uint64_t size = BP_GET_LSIZE(bp); 4068 arc_callback_t *acb; 4069 vdev_t *vd = NULL; 4070 uint64_t addr = 0; 4071 boolean_t devw = B_FALSE; 4072 enum zio_compress b_compress = ZIO_COMPRESS_OFF; 4073 int32_t b_asize = 0; 4074 4075 if (hdr == NULL) { 4076 /* this block is not in the cache */ 4077 arc_buf_hdr_t *exists = NULL; 4078 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 4079 buf = arc_buf_alloc(spa, size, private, type); 4080 hdr = buf->b_hdr; 4081 if (!BP_IS_EMBEDDED(bp)) { 4082 hdr->b_dva = *BP_IDENTITY(bp); 4083 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 4084 exists = buf_hash_insert(hdr, &hash_lock); 4085 } 4086 if (exists != NULL) { 4087 /* somebody beat us to the hash insert */ 4088 mutex_exit(hash_lock); 4089 buf_discard_identity(hdr); 4090 (void) arc_buf_remove_ref(buf, private); 4091 goto top; /* restart the IO request */ 4092 } 4093 4094 /* if this is a prefetch, we don't have a reference */ 4095 if (*arc_flags & ARC_FLAG_PREFETCH) { 4096 (void) remove_reference(hdr, hash_lock, 4097 private); 4098 hdr->b_flags |= ARC_FLAG_PREFETCH; 4099 } 4100 if (*arc_flags & ARC_FLAG_L2CACHE) 4101 hdr->b_flags |= ARC_FLAG_L2CACHE; 4102 if (*arc_flags & ARC_FLAG_L2COMPRESS) 4103 hdr->b_flags |= ARC_FLAG_L2COMPRESS; 4104 if (BP_GET_LEVEL(bp) > 0) 4105 hdr->b_flags |= ARC_FLAG_INDIRECT; 4106 } else { 4107 /* 4108 * This block is in the ghost cache. If it was L2-only 4109 * (and thus didn't have an L1 hdr), we realloc the 4110 * header to add an L1 hdr. 4111 */ 4112 if (!HDR_HAS_L1HDR(hdr)) { 4113 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, 4114 hdr_full_cache); 4115 } 4116 4117 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state)); 4118 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4119 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4120 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 4121 4122 /* if this is a prefetch, we don't have a reference */ 4123 if (*arc_flags & ARC_FLAG_PREFETCH) 4124 hdr->b_flags |= ARC_FLAG_PREFETCH; 4125 else 4126 add_reference(hdr, hash_lock, private); 4127 if (*arc_flags & ARC_FLAG_L2CACHE) 4128 hdr->b_flags |= ARC_FLAG_L2CACHE; 4129 if (*arc_flags & ARC_FLAG_L2COMPRESS) 4130 hdr->b_flags |= ARC_FLAG_L2COMPRESS; 4131 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 4132 buf->b_hdr = hdr; 4133 buf->b_data = NULL; 4134 buf->b_efunc = NULL; 4135 buf->b_private = NULL; 4136 buf->b_next = NULL; 4137 hdr->b_l1hdr.b_buf = buf; 4138 ASSERT0(hdr->b_l1hdr.b_datacnt); 4139 hdr->b_l1hdr.b_datacnt = 1; 4140 arc_get_data_buf(buf); 4141 arc_access(hdr, hash_lock); 4142 } 4143 4144 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); 4145 4146 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 4147 acb->acb_done = done; 4148 acb->acb_private = private; 4149 4150 ASSERT(hdr->b_l1hdr.b_acb == NULL); 4151 hdr->b_l1hdr.b_acb = acb; 4152 hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; 4153 4154 if (HDR_HAS_L2HDR(hdr) && 4155 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { 4156 devw = hdr->b_l2hdr.b_dev->l2ad_writing; 4157 addr = hdr->b_l2hdr.b_daddr; 4158 b_compress = hdr->b_l2hdr.b_compress; 4159 b_asize = hdr->b_l2hdr.b_asize; 4160 /* 4161 * Lock out device removal. 4162 */ 4163 if (vdev_is_dead(vd) || 4164 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 4165 vd = NULL; 4166 } 4167 4168 if (hash_lock != NULL) 4169 mutex_exit(hash_lock); 4170 4171 /* 4172 * At this point, we have a level 1 cache miss. Try again in 4173 * L2ARC if possible. 4174 */ 4175 ASSERT3U(hdr->b_size, ==, size); 4176 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 4177 uint64_t, size, zbookmark_phys_t *, zb); 4178 ARCSTAT_BUMP(arcstat_misses); 4179 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 4180 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 4181 data, metadata, misses); 4182 4183 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 4184 /* 4185 * Read from the L2ARC if the following are true: 4186 * 1. The L2ARC vdev was previously cached. 4187 * 2. This buffer still has L2ARC metadata. 4188 * 3. This buffer isn't currently writing to the L2ARC. 4189 * 4. The L2ARC entry wasn't evicted, which may 4190 * also have invalidated the vdev. 4191 * 5. This isn't prefetch and l2arc_noprefetch is set. 4192 */ 4193 if (HDR_HAS_L2HDR(hdr) && 4194 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 4195 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 4196 l2arc_read_callback_t *cb; 4197 4198 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 4199 ARCSTAT_BUMP(arcstat_l2_hits); 4200 4201 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 4202 KM_SLEEP); 4203 cb->l2rcb_buf = buf; 4204 cb->l2rcb_spa = spa; 4205 cb->l2rcb_bp = *bp; 4206 cb->l2rcb_zb = *zb; 4207 cb->l2rcb_flags = zio_flags; 4208 cb->l2rcb_compress = b_compress; 4209 4210 ASSERT(addr >= VDEV_LABEL_START_SIZE && 4211 addr + size < vd->vdev_psize - 4212 VDEV_LABEL_END_SIZE); 4213 4214 /* 4215 * l2arc read. The SCL_L2ARC lock will be 4216 * released by l2arc_read_done(). 4217 * Issue a null zio if the underlying buffer 4218 * was squashed to zero size by compression. 4219 */ 4220 if (b_compress == ZIO_COMPRESS_EMPTY) { 4221 rzio = zio_null(pio, spa, vd, 4222 l2arc_read_done, cb, 4223 zio_flags | ZIO_FLAG_DONT_CACHE | 4224 ZIO_FLAG_CANFAIL | 4225 ZIO_FLAG_DONT_PROPAGATE | 4226 ZIO_FLAG_DONT_RETRY); 4227 } else { 4228 rzio = zio_read_phys(pio, vd, addr, 4229 b_asize, buf->b_data, 4230 ZIO_CHECKSUM_OFF, 4231 l2arc_read_done, cb, priority, 4232 zio_flags | ZIO_FLAG_DONT_CACHE | 4233 ZIO_FLAG_CANFAIL | 4234 ZIO_FLAG_DONT_PROPAGATE | 4235 ZIO_FLAG_DONT_RETRY, B_FALSE); 4236 } 4237 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 4238 zio_t *, rzio); 4239 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize); 4240 4241 if (*arc_flags & ARC_FLAG_NOWAIT) { 4242 zio_nowait(rzio); 4243 return (0); 4244 } 4245 4246 ASSERT(*arc_flags & ARC_FLAG_WAIT); 4247 if (zio_wait(rzio) == 0) 4248 return (0); 4249 4250 /* l2arc read error; goto zio_read() */ 4251 } else { 4252 DTRACE_PROBE1(l2arc__miss, 4253 arc_buf_hdr_t *, hdr); 4254 ARCSTAT_BUMP(arcstat_l2_misses); 4255 if (HDR_L2_WRITING(hdr)) 4256 ARCSTAT_BUMP(arcstat_l2_rw_clash); 4257 spa_config_exit(spa, SCL_L2ARC, vd); 4258 } 4259 } else { 4260 if (vd != NULL) 4261 spa_config_exit(spa, SCL_L2ARC, vd); 4262 if (l2arc_ndev != 0) { 4263 DTRACE_PROBE1(l2arc__miss, 4264 arc_buf_hdr_t *, hdr); 4265 ARCSTAT_BUMP(arcstat_l2_misses); 4266 } 4267 } 4268 4269 rzio = zio_read(pio, spa, bp, buf->b_data, size, 4270 arc_read_done, buf, priority, zio_flags, zb); 4271 4272 if (*arc_flags & ARC_FLAG_WAIT) 4273 return (zio_wait(rzio)); 4274 4275 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 4276 zio_nowait(rzio); 4277 } 4278 return (0); 4279 } 4280 4281 void 4282 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 4283 { 4284 ASSERT(buf->b_hdr != NULL); 4285 ASSERT(buf->b_hdr->b_l1hdr.b_state != arc_anon); 4286 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt) || 4287 func == NULL); 4288 ASSERT(buf->b_efunc == NULL); 4289 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 4290 4291 buf->b_efunc = func; 4292 buf->b_private = private; 4293 } 4294 4295 /* 4296 * Notify the arc that a block was freed, and thus will never be used again. 4297 */ 4298 void 4299 arc_freed(spa_t *spa, const blkptr_t *bp) 4300 { 4301 arc_buf_hdr_t *hdr; 4302 kmutex_t *hash_lock; 4303 uint64_t guid = spa_load_guid(spa); 4304 4305 ASSERT(!BP_IS_EMBEDDED(bp)); 4306 4307 hdr = buf_hash_find(guid, bp, &hash_lock); 4308 if (hdr == NULL) 4309 return; 4310 if (HDR_BUF_AVAILABLE(hdr)) { 4311 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 4312 add_reference(hdr, hash_lock, FTAG); 4313 hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; 4314 mutex_exit(hash_lock); 4315 4316 arc_release(buf, FTAG); 4317 (void) arc_buf_remove_ref(buf, FTAG); 4318 } else { 4319 mutex_exit(hash_lock); 4320 } 4321 4322 } 4323 4324 /* 4325 * Clear the user eviction callback set by arc_set_callback(), first calling 4326 * it if it exists. Because the presence of a callback keeps an arc_buf cached 4327 * clearing the callback may result in the arc_buf being destroyed. However, 4328 * it will not result in the *last* arc_buf being destroyed, hence the data 4329 * will remain cached in the ARC. We make a copy of the arc buffer here so 4330 * that we can process the callback without holding any locks. 4331 * 4332 * It's possible that the callback is already in the process of being cleared 4333 * by another thread. In this case we can not clear the callback. 4334 * 4335 * Returns B_TRUE if the callback was successfully called and cleared. 4336 */ 4337 boolean_t 4338 arc_clear_callback(arc_buf_t *buf) 4339 { 4340 arc_buf_hdr_t *hdr; 4341 kmutex_t *hash_lock; 4342 arc_evict_func_t *efunc = buf->b_efunc; 4343 void *private = buf->b_private; 4344 4345 mutex_enter(&buf->b_evict_lock); 4346 hdr = buf->b_hdr; 4347 if (hdr == NULL) { 4348 /* 4349 * We are in arc_do_user_evicts(). 4350 */ 4351 ASSERT(buf->b_data == NULL); 4352 mutex_exit(&buf->b_evict_lock); 4353 return (B_FALSE); 4354 } else if (buf->b_data == NULL) { 4355 /* 4356 * We are on the eviction list; process this buffer now 4357 * but let arc_do_user_evicts() do the reaping. 4358 */ 4359 buf->b_efunc = NULL; 4360 mutex_exit(&buf->b_evict_lock); 4361 VERIFY0(efunc(private)); 4362 return (B_TRUE); 4363 } 4364 hash_lock = HDR_LOCK(hdr); 4365 mutex_enter(hash_lock); 4366 hdr = buf->b_hdr; 4367 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4368 4369 ASSERT3U(refcount_count(&hdr->b_l1hdr.b_refcnt), <, 4370 hdr->b_l1hdr.b_datacnt); 4371 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 4372 hdr->b_l1hdr.b_state == arc_mfu); 4373 4374 buf->b_efunc = NULL; 4375 buf->b_private = NULL; 4376 4377 if (hdr->b_l1hdr.b_datacnt > 1) { 4378 mutex_exit(&buf->b_evict_lock); 4379 arc_buf_destroy(buf, TRUE); 4380 } else { 4381 ASSERT(buf == hdr->b_l1hdr.b_buf); 4382 hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; 4383 mutex_exit(&buf->b_evict_lock); 4384 } 4385 4386 mutex_exit(hash_lock); 4387 VERIFY0(efunc(private)); 4388 return (B_TRUE); 4389 } 4390 4391 /* 4392 * Release this buffer from the cache, making it an anonymous buffer. This 4393 * must be done after a read and prior to modifying the buffer contents. 4394 * If the buffer has more than one reference, we must make 4395 * a new hdr for the buffer. 4396 */ 4397 void 4398 arc_release(arc_buf_t *buf, void *tag) 4399 { 4400 arc_buf_hdr_t *hdr = buf->b_hdr; 4401 4402 /* 4403 * It would be nice to assert that if it's DMU metadata (level > 4404 * 0 || it's the dnode file), then it must be syncing context. 4405 * But we don't know that information at this level. 4406 */ 4407 4408 mutex_enter(&buf->b_evict_lock); 4409 4410 ASSERT(HDR_HAS_L1HDR(hdr)); 4411 4412 /* 4413 * We don't grab the hash lock prior to this check, because if 4414 * the buffer's header is in the arc_anon state, it won't be 4415 * linked into the hash table. 4416 */ 4417 if (hdr->b_l1hdr.b_state == arc_anon) { 4418 mutex_exit(&buf->b_evict_lock); 4419 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4420 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 4421 ASSERT(!HDR_HAS_L2HDR(hdr)); 4422 ASSERT(BUF_EMPTY(hdr)); 4423 4424 ASSERT3U(hdr->b_l1hdr.b_datacnt, ==, 1); 4425 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); 4426 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); 4427 4428 ASSERT3P(buf->b_efunc, ==, NULL); 4429 ASSERT3P(buf->b_private, ==, NULL); 4430 4431 hdr->b_l1hdr.b_arc_access = 0; 4432 arc_buf_thaw(buf); 4433 4434 return; 4435 } 4436 4437 kmutex_t *hash_lock = HDR_LOCK(hdr); 4438 mutex_enter(hash_lock); 4439 4440 /* 4441 * This assignment is only valid as long as the hash_lock is 4442 * held, we must be careful not to reference state or the 4443 * b_state field after dropping the lock. 4444 */ 4445 arc_state_t *state = hdr->b_l1hdr.b_state; 4446 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4447 ASSERT3P(state, !=, arc_anon); 4448 4449 /* this buffer is not on any list */ 4450 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0); 4451 4452 if (HDR_HAS_L2HDR(hdr)) { 4453 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); 4454 4455 /* 4456 * We have to recheck this conditional again now that 4457 * we're holding the l2ad_mtx to prevent a race with 4458 * another thread which might be concurrently calling 4459 * l2arc_evict(). In that case, l2arc_evict() might have 4460 * destroyed the header's L2 portion as we were waiting 4461 * to acquire the l2ad_mtx. 4462 */ 4463 if (HDR_HAS_L2HDR(hdr)) 4464 arc_hdr_l2hdr_destroy(hdr); 4465 4466 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); 4467 } 4468 4469 /* 4470 * Do we have more than one buf? 4471 */ 4472 if (hdr->b_l1hdr.b_datacnt > 1) { 4473 arc_buf_hdr_t *nhdr; 4474 arc_buf_t **bufp; 4475 uint64_t blksz = hdr->b_size; 4476 uint64_t spa = hdr->b_spa; 4477 arc_buf_contents_t type = arc_buf_type(hdr); 4478 uint32_t flags = hdr->b_flags; 4479 4480 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); 4481 /* 4482 * Pull the data off of this hdr and attach it to 4483 * a new anonymous hdr. 4484 */ 4485 (void) remove_reference(hdr, hash_lock, tag); 4486 bufp = &hdr->b_l1hdr.b_buf; 4487 while (*bufp != buf) 4488 bufp = &(*bufp)->b_next; 4489 *bufp = buf->b_next; 4490 buf->b_next = NULL; 4491 4492 ASSERT3P(state, !=, arc_l2c_only); 4493 4494 (void) refcount_remove_many( 4495 &state->arcs_size, hdr->b_size, buf); 4496 4497 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { 4498 ASSERT3P(state, !=, arc_l2c_only); 4499 uint64_t *size = &state->arcs_lsize[type]; 4500 ASSERT3U(*size, >=, hdr->b_size); 4501 atomic_add_64(size, -hdr->b_size); 4502 } 4503 4504 /* 4505 * We're releasing a duplicate user data buffer, update 4506 * our statistics accordingly. 4507 */ 4508 if (HDR_ISTYPE_DATA(hdr)) { 4509 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 4510 ARCSTAT_INCR(arcstat_duplicate_buffers_size, 4511 -hdr->b_size); 4512 } 4513 hdr->b_l1hdr.b_datacnt -= 1; 4514 arc_cksum_verify(buf); 4515 arc_buf_unwatch(buf); 4516 4517 mutex_exit(hash_lock); 4518 4519 nhdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 4520 nhdr->b_size = blksz; 4521 nhdr->b_spa = spa; 4522 4523 nhdr->b_flags = flags & ARC_FLAG_L2_WRITING; 4524 nhdr->b_flags |= arc_bufc_to_flags(type); 4525 nhdr->b_flags |= ARC_FLAG_HAS_L1HDR; 4526 4527 nhdr->b_l1hdr.b_buf = buf; 4528 nhdr->b_l1hdr.b_datacnt = 1; 4529 nhdr->b_l1hdr.b_state = arc_anon; 4530 nhdr->b_l1hdr.b_arc_access = 0; 4531 nhdr->b_l1hdr.b_tmp_cdata = NULL; 4532 nhdr->b_freeze_cksum = NULL; 4533 4534 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); 4535 buf->b_hdr = nhdr; 4536 mutex_exit(&buf->b_evict_lock); 4537 (void) refcount_add_many(&arc_anon->arcs_size, blksz, buf); 4538 } else { 4539 mutex_exit(&buf->b_evict_lock); 4540 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); 4541 /* protected by hash lock, or hdr is on arc_anon */ 4542 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 4543 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4544 arc_change_state(arc_anon, hdr, hash_lock); 4545 hdr->b_l1hdr.b_arc_access = 0; 4546 mutex_exit(hash_lock); 4547 4548 buf_discard_identity(hdr); 4549 arc_buf_thaw(buf); 4550 } 4551 buf->b_efunc = NULL; 4552 buf->b_private = NULL; 4553 } 4554 4555 int 4556 arc_released(arc_buf_t *buf) 4557 { 4558 int released; 4559 4560 mutex_enter(&buf->b_evict_lock); 4561 released = (buf->b_data != NULL && 4562 buf->b_hdr->b_l1hdr.b_state == arc_anon); 4563 mutex_exit(&buf->b_evict_lock); 4564 return (released); 4565 } 4566 4567 #ifdef ZFS_DEBUG 4568 int 4569 arc_referenced(arc_buf_t *buf) 4570 { 4571 int referenced; 4572 4573 mutex_enter(&buf->b_evict_lock); 4574 referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); 4575 mutex_exit(&buf->b_evict_lock); 4576 return (referenced); 4577 } 4578 #endif 4579 4580 static void 4581 arc_write_ready(zio_t *zio) 4582 { 4583 arc_write_callback_t *callback = zio->io_private; 4584 arc_buf_t *buf = callback->awcb_buf; 4585 arc_buf_hdr_t *hdr = buf->b_hdr; 4586 4587 ASSERT(HDR_HAS_L1HDR(hdr)); 4588 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); 4589 ASSERT(hdr->b_l1hdr.b_datacnt > 0); 4590 callback->awcb_ready(zio, buf, callback->awcb_private); 4591 4592 /* 4593 * If the IO is already in progress, then this is a re-write 4594 * attempt, so we need to thaw and re-compute the cksum. 4595 * It is the responsibility of the callback to handle the 4596 * accounting for any re-write attempt. 4597 */ 4598 if (HDR_IO_IN_PROGRESS(hdr)) { 4599 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 4600 if (hdr->b_freeze_cksum != NULL) { 4601 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 4602 hdr->b_freeze_cksum = NULL; 4603 } 4604 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 4605 } 4606 arc_cksum_compute(buf, B_FALSE); 4607 hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; 4608 } 4609 4610 /* 4611 * The SPA calls this callback for each physical write that happens on behalf 4612 * of a logical write. See the comment in dbuf_write_physdone() for details. 4613 */ 4614 static void 4615 arc_write_physdone(zio_t *zio) 4616 { 4617 arc_write_callback_t *cb = zio->io_private; 4618 if (cb->awcb_physdone != NULL) 4619 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 4620 } 4621 4622 static void 4623 arc_write_done(zio_t *zio) 4624 { 4625 arc_write_callback_t *callback = zio->io_private; 4626 arc_buf_t *buf = callback->awcb_buf; 4627 arc_buf_hdr_t *hdr = buf->b_hdr; 4628 4629 ASSERT(hdr->b_l1hdr.b_acb == NULL); 4630 4631 if (zio->io_error == 0) { 4632 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 4633 buf_discard_identity(hdr); 4634 } else { 4635 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 4636 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 4637 } 4638 } else { 4639 ASSERT(BUF_EMPTY(hdr)); 4640 } 4641 4642 /* 4643 * If the block to be written was all-zero or compressed enough to be 4644 * embedded in the BP, no write was performed so there will be no 4645 * dva/birth/checksum. The buffer must therefore remain anonymous 4646 * (and uncached). 4647 */ 4648 if (!BUF_EMPTY(hdr)) { 4649 arc_buf_hdr_t *exists; 4650 kmutex_t *hash_lock; 4651 4652 ASSERT(zio->io_error == 0); 4653 4654 arc_cksum_verify(buf); 4655 4656 exists = buf_hash_insert(hdr, &hash_lock); 4657 if (exists != NULL) { 4658 /* 4659 * This can only happen if we overwrite for 4660 * sync-to-convergence, because we remove 4661 * buffers from the hash table when we arc_free(). 4662 */ 4663 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 4664 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 4665 panic("bad overwrite, hdr=%p exists=%p", 4666 (void *)hdr, (void *)exists); 4667 ASSERT(refcount_is_zero( 4668 &exists->b_l1hdr.b_refcnt)); 4669 arc_change_state(arc_anon, exists, hash_lock); 4670 mutex_exit(hash_lock); 4671 arc_hdr_destroy(exists); 4672 exists = buf_hash_insert(hdr, &hash_lock); 4673 ASSERT3P(exists, ==, NULL); 4674 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 4675 /* nopwrite */ 4676 ASSERT(zio->io_prop.zp_nopwrite); 4677 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 4678 panic("bad nopwrite, hdr=%p exists=%p", 4679 (void *)hdr, (void *)exists); 4680 } else { 4681 /* Dedup */ 4682 ASSERT(hdr->b_l1hdr.b_datacnt == 1); 4683 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 4684 ASSERT(BP_GET_DEDUP(zio->io_bp)); 4685 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 4686 } 4687 } 4688 hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 4689 /* if it's not anon, we are doing a scrub */ 4690 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) 4691 arc_access(hdr, hash_lock); 4692 mutex_exit(hash_lock); 4693 } else { 4694 hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; 4695 } 4696 4697 ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4698 callback->awcb_done(zio, buf, callback->awcb_private); 4699 4700 kmem_free(callback, sizeof (arc_write_callback_t)); 4701 } 4702 4703 zio_t * 4704 arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 4705 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress, 4706 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone, 4707 arc_done_func_t *done, void *private, zio_priority_t priority, 4708 int zio_flags, const zbookmark_phys_t *zb) 4709 { 4710 arc_buf_hdr_t *hdr = buf->b_hdr; 4711 arc_write_callback_t *callback; 4712 zio_t *zio; 4713 4714 ASSERT(ready != NULL); 4715 ASSERT(done != NULL); 4716 ASSERT(!HDR_IO_ERROR(hdr)); 4717 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 4718 ASSERT(hdr->b_l1hdr.b_acb == NULL); 4719 ASSERT(hdr->b_l1hdr.b_datacnt > 0); 4720 if (l2arc) 4721 hdr->b_flags |= ARC_FLAG_L2CACHE; 4722 if (l2arc_compress) 4723 hdr->b_flags |= ARC_FLAG_L2COMPRESS; 4724 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 4725 callback->awcb_ready = ready; 4726 callback->awcb_physdone = physdone; 4727 callback->awcb_done = done; 4728 callback->awcb_private = private; 4729 callback->awcb_buf = buf; 4730 4731 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 4732 arc_write_ready, arc_write_physdone, arc_write_done, callback, 4733 priority, zio_flags, zb); 4734 4735 return (zio); 4736 } 4737 4738 static int 4739 arc_memory_throttle(uint64_t reserve, uint64_t txg) 4740 { 4741 #ifdef _KERNEL 4742 uint64_t available_memory = ptob(freemem); 4743 static uint64_t page_load = 0; 4744 static uint64_t last_txg = 0; 4745 4746 #if defined(__i386) 4747 available_memory = 4748 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 4749 #endif 4750 4751 if (freemem > physmem * arc_lotsfree_percent / 100) 4752 return (0); 4753 4754 if (txg > last_txg) { 4755 last_txg = txg; 4756 page_load = 0; 4757 } 4758 /* 4759 * If we are in pageout, we know that memory is already tight, 4760 * the arc is already going to be evicting, so we just want to 4761 * continue to let page writes occur as quickly as possible. 4762 */ 4763 if (curproc == proc_pageout) { 4764 if (page_load > MAX(ptob(minfree), available_memory) / 4) 4765 return (SET_ERROR(ERESTART)); 4766 /* Note: reserve is inflated, so we deflate */ 4767 page_load += reserve / 8; 4768 return (0); 4769 } else if (page_load > 0 && arc_reclaim_needed()) { 4770 /* memory is low, delay before restarting */ 4771 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 4772 return (SET_ERROR(EAGAIN)); 4773 } 4774 page_load = 0; 4775 #endif 4776 return (0); 4777 } 4778 4779 void 4780 arc_tempreserve_clear(uint64_t reserve) 4781 { 4782 atomic_add_64(&arc_tempreserve, -reserve); 4783 ASSERT((int64_t)arc_tempreserve >= 0); 4784 } 4785 4786 int 4787 arc_tempreserve_space(uint64_t reserve, uint64_t txg) 4788 { 4789 int error; 4790 uint64_t anon_size; 4791 4792 if (reserve > arc_c/4 && !arc_no_grow) 4793 arc_c = MIN(arc_c_max, reserve * 4); 4794 if (reserve > arc_c) 4795 return (SET_ERROR(ENOMEM)); 4796 4797 /* 4798 * Don't count loaned bufs as in flight dirty data to prevent long 4799 * network delays from blocking transactions that are ready to be 4800 * assigned to a txg. 4801 */ 4802 anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - 4803 arc_loaned_bytes), 0); 4804 4805 /* 4806 * Writes will, almost always, require additional memory allocations 4807 * in order to compress/encrypt/etc the data. We therefore need to 4808 * make sure that there is sufficient available memory for this. 4809 */ 4810 error = arc_memory_throttle(reserve, txg); 4811 if (error != 0) 4812 return (error); 4813 4814 /* 4815 * Throttle writes when the amount of dirty data in the cache 4816 * gets too large. We try to keep the cache less than half full 4817 * of dirty blocks so that our sync times don't grow too large. 4818 * Note: if two requests come in concurrently, we might let them 4819 * both succeed, when one of them should fail. Not a huge deal. 4820 */ 4821 4822 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 4823 anon_size > arc_c / 4) { 4824 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 4825 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 4826 arc_tempreserve>>10, 4827 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 4828 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 4829 reserve>>10, arc_c>>10); 4830 return (SET_ERROR(ERESTART)); 4831 } 4832 atomic_add_64(&arc_tempreserve, reserve); 4833 return (0); 4834 } 4835 4836 static void 4837 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, 4838 kstat_named_t *evict_data, kstat_named_t *evict_metadata) 4839 { 4840 size->value.ui64 = refcount_count(&state->arcs_size); 4841 evict_data->value.ui64 = state->arcs_lsize[ARC_BUFC_DATA]; 4842 evict_metadata->value.ui64 = state->arcs_lsize[ARC_BUFC_METADATA]; 4843 } 4844 4845 static int 4846 arc_kstat_update(kstat_t *ksp, int rw) 4847 { 4848 arc_stats_t *as = ksp->ks_data; 4849 4850 if (rw == KSTAT_WRITE) { 4851 return (EACCES); 4852 } else { 4853 arc_kstat_update_state(arc_anon, 4854 &as->arcstat_anon_size, 4855 &as->arcstat_anon_evictable_data, 4856 &as->arcstat_anon_evictable_metadata); 4857 arc_kstat_update_state(arc_mru, 4858 &as->arcstat_mru_size, 4859 &as->arcstat_mru_evictable_data, 4860 &as->arcstat_mru_evictable_metadata); 4861 arc_kstat_update_state(arc_mru_ghost, 4862 &as->arcstat_mru_ghost_size, 4863 &as->arcstat_mru_ghost_evictable_data, 4864 &as->arcstat_mru_ghost_evictable_metadata); 4865 arc_kstat_update_state(arc_mfu, 4866 &as->arcstat_mfu_size, 4867 &as->arcstat_mfu_evictable_data, 4868 &as->arcstat_mfu_evictable_metadata); 4869 arc_kstat_update_state(arc_mfu_ghost, 4870 &as->arcstat_mfu_ghost_size, 4871 &as->arcstat_mfu_ghost_evictable_data, 4872 &as->arcstat_mfu_ghost_evictable_metadata); 4873 } 4874 4875 return (0); 4876 } 4877 4878 /* 4879 * This function *must* return indices evenly distributed between all 4880 * sublists of the multilist. This is needed due to how the ARC eviction 4881 * code is laid out; arc_evict_state() assumes ARC buffers are evenly 4882 * distributed between all sublists and uses this assumption when 4883 * deciding which sublist to evict from and how much to evict from it. 4884 */ 4885 unsigned int 4886 arc_state_multilist_index_func(multilist_t *ml, void *obj) 4887 { 4888 arc_buf_hdr_t *hdr = obj; 4889 4890 /* 4891 * We rely on b_dva to generate evenly distributed index 4892 * numbers using buf_hash below. So, as an added precaution, 4893 * let's make sure we never add empty buffers to the arc lists. 4894 */ 4895 ASSERT(!BUF_EMPTY(hdr)); 4896 4897 /* 4898 * The assumption here, is the hash value for a given 4899 * arc_buf_hdr_t will remain constant throughout it's lifetime 4900 * (i.e. it's b_spa, b_dva, and b_birth fields don't change). 4901 * Thus, we don't need to store the header's sublist index 4902 * on insertion, as this index can be recalculated on removal. 4903 * 4904 * Also, the low order bits of the hash value are thought to be 4905 * distributed evenly. Otherwise, in the case that the multilist 4906 * has a power of two number of sublists, each sublists' usage 4907 * would not be evenly distributed. 4908 */ 4909 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % 4910 multilist_get_num_sublists(ml)); 4911 } 4912 4913 void 4914 arc_init(void) 4915 { 4916 /* 4917 * allmem is "all memory that we could possibly use". 4918 */ 4919 #ifdef _KERNEL 4920 uint64_t allmem = ptob(physmem - swapfs_minfree); 4921 #else 4922 uint64_t allmem = (physmem * PAGESIZE) / 2; 4923 #endif 4924 4925 mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 4926 cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL); 4927 cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL); 4928 4929 mutex_init(&arc_user_evicts_lock, NULL, MUTEX_DEFAULT, NULL); 4930 cv_init(&arc_user_evicts_cv, NULL, CV_DEFAULT, NULL); 4931 4932 /* Convert seconds to clock ticks */ 4933 arc_min_prefetch_lifespan = 1 * hz; 4934 4935 /* Start out with 1/8 of all memory */ 4936 arc_c = allmem / 8; 4937 4938 #ifdef _KERNEL 4939 /* 4940 * On architectures where the physical memory can be larger 4941 * than the addressable space (intel in 32-bit mode), we may 4942 * need to limit the cache to 1/8 of VM size. 4943 */ 4944 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 4945 #endif 4946 4947 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 4948 arc_c_min = MAX(allmem / 32, 64 << 20); 4949 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 4950 if (allmem >= 1 << 30) 4951 arc_c_max = allmem - (1 << 30); 4952 else 4953 arc_c_max = arc_c_min; 4954 arc_c_max = MAX(allmem * 3 / 4, arc_c_max); 4955 4956 /* 4957 * Allow the tunables to override our calculations if they are 4958 * reasonable (ie. over 64MB) 4959 */ 4960 if (zfs_arc_max > 64 << 20 && zfs_arc_max < allmem) 4961 arc_c_max = zfs_arc_max; 4962 if (zfs_arc_min > 64 << 20 && zfs_arc_min <= arc_c_max) 4963 arc_c_min = zfs_arc_min; 4964 4965 arc_c = arc_c_max; 4966 arc_p = (arc_c >> 1); 4967 4968 /* limit meta-data to 1/4 of the arc capacity */ 4969 arc_meta_limit = arc_c_max / 4; 4970 4971 /* Allow the tunable to override if it is reasonable */ 4972 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 4973 arc_meta_limit = zfs_arc_meta_limit; 4974 4975 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 4976 arc_c_min = arc_meta_limit / 2; 4977 4978 if (zfs_arc_meta_min > 0) { 4979 arc_meta_min = zfs_arc_meta_min; 4980 } else { 4981 arc_meta_min = arc_c_min / 2; 4982 } 4983 4984 if (zfs_arc_grow_retry > 0) 4985 arc_grow_retry = zfs_arc_grow_retry; 4986 4987 if (zfs_arc_shrink_shift > 0) 4988 arc_shrink_shift = zfs_arc_shrink_shift; 4989 4990 /* 4991 * Ensure that arc_no_grow_shift is less than arc_shrink_shift. 4992 */ 4993 if (arc_no_grow_shift >= arc_shrink_shift) 4994 arc_no_grow_shift = arc_shrink_shift - 1; 4995 4996 if (zfs_arc_p_min_shift > 0) 4997 arc_p_min_shift = zfs_arc_p_min_shift; 4998 4999 if (zfs_arc_num_sublists_per_state < 1) 5000 zfs_arc_num_sublists_per_state = MAX(boot_ncpus, 1); 5001 5002 /* if kmem_flags are set, lets try to use less memory */ 5003 if (kmem_debugging()) 5004 arc_c = arc_c / 2; 5005 if (arc_c < arc_c_min) 5006 arc_c = arc_c_min; 5007 5008 arc_anon = &ARC_anon; 5009 arc_mru = &ARC_mru; 5010 arc_mru_ghost = &ARC_mru_ghost; 5011 arc_mfu = &ARC_mfu; 5012 arc_mfu_ghost = &ARC_mfu_ghost; 5013 arc_l2c_only = &ARC_l2c_only; 5014 arc_size = 0; 5015 5016 multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 5017 sizeof (arc_buf_hdr_t), 5018 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5019 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5020 multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 5021 sizeof (arc_buf_hdr_t), 5022 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5023 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5024 multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 5025 sizeof (arc_buf_hdr_t), 5026 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5027 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5028 multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 5029 sizeof (arc_buf_hdr_t), 5030 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5031 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5032 multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 5033 sizeof (arc_buf_hdr_t), 5034 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5035 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5036 multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 5037 sizeof (arc_buf_hdr_t), 5038 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5039 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5040 multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 5041 sizeof (arc_buf_hdr_t), 5042 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5043 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5044 multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 5045 sizeof (arc_buf_hdr_t), 5046 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5047 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5048 multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 5049 sizeof (arc_buf_hdr_t), 5050 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5051 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5052 multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 5053 sizeof (arc_buf_hdr_t), 5054 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 5055 zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); 5056 5057 refcount_create(&arc_anon->arcs_size); 5058 refcount_create(&arc_mru->arcs_size); 5059 refcount_create(&arc_mru_ghost->arcs_size); 5060 refcount_create(&arc_mfu->arcs_size); 5061 refcount_create(&arc_mfu_ghost->arcs_size); 5062 refcount_create(&arc_l2c_only->arcs_size); 5063 5064 buf_init(); 5065 5066 arc_reclaim_thread_exit = FALSE; 5067 arc_user_evicts_thread_exit = FALSE; 5068 arc_eviction_list = NULL; 5069 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 5070 5071 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 5072 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 5073 5074 if (arc_ksp != NULL) { 5075 arc_ksp->ks_data = &arc_stats; 5076 arc_ksp->ks_update = arc_kstat_update; 5077 kstat_install(arc_ksp); 5078 } 5079 5080 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 5081 TS_RUN, minclsyspri); 5082 5083 (void) thread_create(NULL, 0, arc_user_evicts_thread, NULL, 0, &p0, 5084 TS_RUN, minclsyspri); 5085 5086 arc_dead = FALSE; 5087 arc_warm = B_FALSE; 5088 5089 /* 5090 * Calculate maximum amount of dirty data per pool. 5091 * 5092 * If it has been set by /etc/system, take that. 5093 * Otherwise, use a percentage of physical memory defined by 5094 * zfs_dirty_data_max_percent (default 10%) with a cap at 5095 * zfs_dirty_data_max_max (default 4GB). 5096 */ 5097 if (zfs_dirty_data_max == 0) { 5098 zfs_dirty_data_max = physmem * PAGESIZE * 5099 zfs_dirty_data_max_percent / 100; 5100 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 5101 zfs_dirty_data_max_max); 5102 } 5103 } 5104 5105 void 5106 arc_fini(void) 5107 { 5108 mutex_enter(&arc_reclaim_lock); 5109 arc_reclaim_thread_exit = TRUE; 5110 /* 5111 * The reclaim thread will set arc_reclaim_thread_exit back to 5112 * FALSE when it is finished exiting; we're waiting for that. 5113 */ 5114 while (arc_reclaim_thread_exit) { 5115 cv_signal(&arc_reclaim_thread_cv); 5116 cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock); 5117 } 5118 mutex_exit(&arc_reclaim_lock); 5119 5120 mutex_enter(&arc_user_evicts_lock); 5121 arc_user_evicts_thread_exit = TRUE; 5122 /* 5123 * The user evicts thread will set arc_user_evicts_thread_exit 5124 * to FALSE when it is finished exiting; we're waiting for that. 5125 */ 5126 while (arc_user_evicts_thread_exit) { 5127 cv_signal(&arc_user_evicts_cv); 5128 cv_wait(&arc_user_evicts_cv, &arc_user_evicts_lock); 5129 } 5130 mutex_exit(&arc_user_evicts_lock); 5131 5132 /* Use TRUE to ensure *all* buffers are evicted */ 5133 arc_flush(NULL, TRUE); 5134 5135 arc_dead = TRUE; 5136 5137 if (arc_ksp != NULL) { 5138 kstat_delete(arc_ksp); 5139 arc_ksp = NULL; 5140 } 5141 5142 mutex_destroy(&arc_reclaim_lock); 5143 cv_destroy(&arc_reclaim_thread_cv); 5144 cv_destroy(&arc_reclaim_waiters_cv); 5145 5146 mutex_destroy(&arc_user_evicts_lock); 5147 cv_destroy(&arc_user_evicts_cv); 5148 5149 refcount_destroy(&arc_anon->arcs_size); 5150 refcount_destroy(&arc_mru->arcs_size); 5151 refcount_destroy(&arc_mru_ghost->arcs_size); 5152 refcount_destroy(&arc_mfu->arcs_size); 5153 refcount_destroy(&arc_mfu_ghost->arcs_size); 5154 refcount_destroy(&arc_l2c_only->arcs_size); 5155 5156 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 5157 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 5158 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 5159 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 5160 multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 5161 multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 5162 multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 5163 multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 5164 5165 buf_fini(); 5166 5167 ASSERT0(arc_loaned_bytes); 5168 } 5169 5170 /* 5171 * Level 2 ARC 5172 * 5173 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 5174 * It uses dedicated storage devices to hold cached data, which are populated 5175 * using large infrequent writes. The main role of this cache is to boost 5176 * the performance of random read workloads. The intended L2ARC devices 5177 * include short-stroked disks, solid state disks, and other media with 5178 * substantially faster read latency than disk. 5179 * 5180 * +-----------------------+ 5181 * | ARC | 5182 * +-----------------------+ 5183 * | ^ ^ 5184 * | | | 5185 * l2arc_feed_thread() arc_read() 5186 * | | | 5187 * | l2arc read | 5188 * V | | 5189 * +---------------+ | 5190 * | L2ARC | | 5191 * +---------------+ | 5192 * | ^ | 5193 * l2arc_write() | | 5194 * | | | 5195 * V | | 5196 * +-------+ +-------+ 5197 * | vdev | | vdev | 5198 * | cache | | cache | 5199 * +-------+ +-------+ 5200 * +=========+ .-----. 5201 * : L2ARC : |-_____-| 5202 * : devices : | Disks | 5203 * +=========+ `-_____-' 5204 * 5205 * Read requests are satisfied from the following sources, in order: 5206 * 5207 * 1) ARC 5208 * 2) vdev cache of L2ARC devices 5209 * 3) L2ARC devices 5210 * 4) vdev cache of disks 5211 * 5) disks 5212 * 5213 * Some L2ARC device types exhibit extremely slow write performance. 5214 * To accommodate for this there are some significant differences between 5215 * the L2ARC and traditional cache design: 5216 * 5217 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 5218 * the ARC behave as usual, freeing buffers and placing headers on ghost 5219 * lists. The ARC does not send buffers to the L2ARC during eviction as 5220 * this would add inflated write latencies for all ARC memory pressure. 5221 * 5222 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 5223 * It does this by periodically scanning buffers from the eviction-end of 5224 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 5225 * not already there. It scans until a headroom of buffers is satisfied, 5226 * which itself is a buffer for ARC eviction. If a compressible buffer is 5227 * found during scanning and selected for writing to an L2ARC device, we 5228 * temporarily boost scanning headroom during the next scan cycle to make 5229 * sure we adapt to compression effects (which might significantly reduce 5230 * the data volume we write to L2ARC). The thread that does this is 5231 * l2arc_feed_thread(), illustrated below; example sizes are included to 5232 * provide a better sense of ratio than this diagram: 5233 * 5234 * head --> tail 5235 * +---------------------+----------+ 5236 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 5237 * +---------------------+----------+ | o L2ARC eligible 5238 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 5239 * +---------------------+----------+ | 5240 * 15.9 Gbytes ^ 32 Mbytes | 5241 * headroom | 5242 * l2arc_feed_thread() 5243 * | 5244 * l2arc write hand <--[oooo]--' 5245 * | 8 Mbyte 5246 * | write max 5247 * V 5248 * +==============================+ 5249 * L2ARC dev |####|#|###|###| |####| ... | 5250 * +==============================+ 5251 * 32 Gbytes 5252 * 5253 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 5254 * evicted, then the L2ARC has cached a buffer much sooner than it probably 5255 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 5256 * safe to say that this is an uncommon case, since buffers at the end of 5257 * the ARC lists have moved there due to inactivity. 5258 * 5259 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 5260 * then the L2ARC simply misses copying some buffers. This serves as a 5261 * pressure valve to prevent heavy read workloads from both stalling the ARC 5262 * with waits and clogging the L2ARC with writes. This also helps prevent 5263 * the potential for the L2ARC to churn if it attempts to cache content too 5264 * quickly, such as during backups of the entire pool. 5265 * 5266 * 5. After system boot and before the ARC has filled main memory, there are 5267 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 5268 * lists can remain mostly static. Instead of searching from tail of these 5269 * lists as pictured, the l2arc_feed_thread() will search from the list heads 5270 * for eligible buffers, greatly increasing its chance of finding them. 5271 * 5272 * The L2ARC device write speed is also boosted during this time so that 5273 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 5274 * there are no L2ARC reads, and no fear of degrading read performance 5275 * through increased writes. 5276 * 5277 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 5278 * the vdev queue can aggregate them into larger and fewer writes. Each 5279 * device is written to in a rotor fashion, sweeping writes through 5280 * available space then repeating. 5281 * 5282 * 7. The L2ARC does not store dirty content. It never needs to flush 5283 * write buffers back to disk based storage. 5284 * 5285 * 8. If an ARC buffer is written (and dirtied) which also exists in the 5286 * L2ARC, the now stale L2ARC buffer is immediately dropped. 5287 * 5288 * The performance of the L2ARC can be tweaked by a number of tunables, which 5289 * may be necessary for different workloads: 5290 * 5291 * l2arc_write_max max write bytes per interval 5292 * l2arc_write_boost extra write bytes during device warmup 5293 * l2arc_noprefetch skip caching prefetched buffers 5294 * l2arc_headroom number of max device writes to precache 5295 * l2arc_headroom_boost when we find compressed buffers during ARC 5296 * scanning, we multiply headroom by this 5297 * percentage factor for the next scan cycle, 5298 * since more compressed buffers are likely to 5299 * be present 5300 * l2arc_feed_secs seconds between L2ARC writing 5301 * 5302 * Tunables may be removed or added as future performance improvements are 5303 * integrated, and also may become zpool properties. 5304 * 5305 * There are three key functions that control how the L2ARC warms up: 5306 * 5307 * l2arc_write_eligible() check if a buffer is eligible to cache 5308 * l2arc_write_size() calculate how much to write 5309 * l2arc_write_interval() calculate sleep delay between writes 5310 * 5311 * These three functions determine what to write, how much, and how quickly 5312 * to send writes. 5313 */ 5314 5315 static boolean_t 5316 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 5317 { 5318 /* 5319 * A buffer is *not* eligible for the L2ARC if it: 5320 * 1. belongs to a different spa. 5321 * 2. is already cached on the L2ARC. 5322 * 3. has an I/O in progress (it may be an incomplete read). 5323 * 4. is flagged not eligible (zfs property). 5324 */ 5325 if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || 5326 HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) 5327 return (B_FALSE); 5328 5329 return (B_TRUE); 5330 } 5331 5332 static uint64_t 5333 l2arc_write_size(void) 5334 { 5335 uint64_t size; 5336 5337 /* 5338 * Make sure our globals have meaningful values in case the user 5339 * altered them. 5340 */ 5341 size = l2arc_write_max; 5342 if (size == 0) { 5343 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 5344 "be greater than zero, resetting it to the default (%d)", 5345 L2ARC_WRITE_SIZE); 5346 size = l2arc_write_max = L2ARC_WRITE_SIZE; 5347 } 5348 5349 if (arc_warm == B_FALSE) 5350 size += l2arc_write_boost; 5351 5352 return (size); 5353 5354 } 5355 5356 static clock_t 5357 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 5358 { 5359 clock_t interval, next, now; 5360 5361 /* 5362 * If the ARC lists are busy, increase our write rate; if the 5363 * lists are stale, idle back. This is achieved by checking 5364 * how much we previously wrote - if it was more than half of 5365 * what we wanted, schedule the next write much sooner. 5366 */ 5367 if (l2arc_feed_again && wrote > (wanted / 2)) 5368 interval = (hz * l2arc_feed_min_ms) / 1000; 5369 else 5370 interval = hz * l2arc_feed_secs; 5371 5372 now = ddi_get_lbolt(); 5373 next = MAX(now, MIN(now + interval, began + interval)); 5374 5375 return (next); 5376 } 5377 5378 /* 5379 * Cycle through L2ARC devices. This is how L2ARC load balances. 5380 * If a device is returned, this also returns holding the spa config lock. 5381 */ 5382 static l2arc_dev_t * 5383 l2arc_dev_get_next(void) 5384 { 5385 l2arc_dev_t *first, *next = NULL; 5386 5387 /* 5388 * Lock out the removal of spas (spa_namespace_lock), then removal 5389 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 5390 * both locks will be dropped and a spa config lock held instead. 5391 */ 5392 mutex_enter(&spa_namespace_lock); 5393 mutex_enter(&l2arc_dev_mtx); 5394 5395 /* if there are no vdevs, there is nothing to do */ 5396 if (l2arc_ndev == 0) 5397 goto out; 5398 5399 first = NULL; 5400 next = l2arc_dev_last; 5401 do { 5402 /* loop around the list looking for a non-faulted vdev */ 5403 if (next == NULL) { 5404 next = list_head(l2arc_dev_list); 5405 } else { 5406 next = list_next(l2arc_dev_list, next); 5407 if (next == NULL) 5408 next = list_head(l2arc_dev_list); 5409 } 5410 5411 /* if we have come back to the start, bail out */ 5412 if (first == NULL) 5413 first = next; 5414 else if (next == first) 5415 break; 5416 5417 } while (vdev_is_dead(next->l2ad_vdev)); 5418 5419 /* if we were unable to find any usable vdevs, return NULL */ 5420 if (vdev_is_dead(next->l2ad_vdev)) 5421 next = NULL; 5422 5423 l2arc_dev_last = next; 5424 5425 out: 5426 mutex_exit(&l2arc_dev_mtx); 5427 5428 /* 5429 * Grab the config lock to prevent the 'next' device from being 5430 * removed while we are writing to it. 5431 */ 5432 if (next != NULL) 5433 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 5434 mutex_exit(&spa_namespace_lock); 5435 5436 return (next); 5437 } 5438 5439 /* 5440 * Free buffers that were tagged for destruction. 5441 */ 5442 static void 5443 l2arc_do_free_on_write() 5444 { 5445 list_t *buflist; 5446 l2arc_data_free_t *df, *df_prev; 5447 5448 mutex_enter(&l2arc_free_on_write_mtx); 5449 buflist = l2arc_free_on_write; 5450 5451 for (df = list_tail(buflist); df; df = df_prev) { 5452 df_prev = list_prev(buflist, df); 5453 ASSERT(df->l2df_data != NULL); 5454 ASSERT(df->l2df_func != NULL); 5455 df->l2df_func(df->l2df_data, df->l2df_size); 5456 list_remove(buflist, df); 5457 kmem_free(df, sizeof (l2arc_data_free_t)); 5458 } 5459 5460 mutex_exit(&l2arc_free_on_write_mtx); 5461 } 5462 5463 /* 5464 * A write to a cache device has completed. Update all headers to allow 5465 * reads from these buffers to begin. 5466 */ 5467 static void 5468 l2arc_write_done(zio_t *zio) 5469 { 5470 l2arc_write_callback_t *cb; 5471 l2arc_dev_t *dev; 5472 list_t *buflist; 5473 arc_buf_hdr_t *head, *hdr, *hdr_prev; 5474 kmutex_t *hash_lock; 5475 int64_t bytes_dropped = 0; 5476 5477 cb = zio->io_private; 5478 ASSERT(cb != NULL); 5479 dev = cb->l2wcb_dev; 5480 ASSERT(dev != NULL); 5481 head = cb->l2wcb_head; 5482 ASSERT(head != NULL); 5483 buflist = &dev->l2ad_buflist; 5484 ASSERT(buflist != NULL); 5485 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 5486 l2arc_write_callback_t *, cb); 5487 5488 if (zio->io_error != 0) 5489 ARCSTAT_BUMP(arcstat_l2_writes_error); 5490 5491 /* 5492 * All writes completed, or an error was hit. 5493 */ 5494 top: 5495 mutex_enter(&dev->l2ad_mtx); 5496 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 5497 hdr_prev = list_prev(buflist, hdr); 5498 5499 hash_lock = HDR_LOCK(hdr); 5500 5501 /* 5502 * We cannot use mutex_enter or else we can deadlock 5503 * with l2arc_write_buffers (due to swapping the order 5504 * the hash lock and l2ad_mtx are taken). 5505 */ 5506 if (!mutex_tryenter(hash_lock)) { 5507 /* 5508 * Missed the hash lock. We must retry so we 5509 * don't leave the ARC_FLAG_L2_WRITING bit set. 5510 */ 5511 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); 5512 5513 /* 5514 * We don't want to rescan the headers we've 5515 * already marked as having been written out, so 5516 * we reinsert the head node so we can pick up 5517 * where we left off. 5518 */ 5519 list_remove(buflist, head); 5520 list_insert_after(buflist, hdr, head); 5521 5522 mutex_exit(&dev->l2ad_mtx); 5523 5524 /* 5525 * We wait for the hash lock to become available 5526 * to try and prevent busy waiting, and increase 5527 * the chance we'll be able to acquire the lock 5528 * the next time around. 5529 */ 5530 mutex_enter(hash_lock); 5531 mutex_exit(hash_lock); 5532 goto top; 5533 } 5534 5535 /* 5536 * We could not have been moved into the arc_l2c_only 5537 * state while in-flight due to our ARC_FLAG_L2_WRITING 5538 * bit being set. Let's just ensure that's being enforced. 5539 */ 5540 ASSERT(HDR_HAS_L1HDR(hdr)); 5541 5542 /* 5543 * We may have allocated a buffer for L2ARC compression, 5544 * we must release it to avoid leaking this data. 5545 */ 5546 l2arc_release_cdata_buf(hdr); 5547 5548 if (zio->io_error != 0) { 5549 /* 5550 * Error - drop L2ARC entry. 5551 */ 5552 list_remove(buflist, hdr); 5553 hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR; 5554 5555 ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize); 5556 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 5557 5558 bytes_dropped += hdr->b_l2hdr.b_asize; 5559 (void) refcount_remove_many(&dev->l2ad_alloc, 5560 hdr->b_l2hdr.b_asize, hdr); 5561 } 5562 5563 /* 5564 * Allow ARC to begin reads and ghost list evictions to 5565 * this L2ARC entry. 5566 */ 5567 hdr->b_flags &= ~ARC_FLAG_L2_WRITING; 5568 5569 mutex_exit(hash_lock); 5570 } 5571 5572 atomic_inc_64(&l2arc_writes_done); 5573 list_remove(buflist, head); 5574 ASSERT(!HDR_HAS_L1HDR(head)); 5575 kmem_cache_free(hdr_l2only_cache, head); 5576 mutex_exit(&dev->l2ad_mtx); 5577 5578 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 5579 5580 l2arc_do_free_on_write(); 5581 5582 kmem_free(cb, sizeof (l2arc_write_callback_t)); 5583 } 5584 5585 /* 5586 * A read to a cache device completed. Validate buffer contents before 5587 * handing over to the regular ARC routines. 5588 */ 5589 static void 5590 l2arc_read_done(zio_t *zio) 5591 { 5592 l2arc_read_callback_t *cb; 5593 arc_buf_hdr_t *hdr; 5594 arc_buf_t *buf; 5595 kmutex_t *hash_lock; 5596 int equal; 5597 5598 ASSERT(zio->io_vd != NULL); 5599 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 5600 5601 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 5602 5603 cb = zio->io_private; 5604 ASSERT(cb != NULL); 5605 buf = cb->l2rcb_buf; 5606 ASSERT(buf != NULL); 5607 5608 hash_lock = HDR_LOCK(buf->b_hdr); 5609 mutex_enter(hash_lock); 5610 hdr = buf->b_hdr; 5611 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 5612 5613 /* 5614 * If the buffer was compressed, decompress it first. 5615 */ 5616 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF) 5617 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress); 5618 ASSERT(zio->io_data != NULL); 5619 ASSERT3U(zio->io_size, ==, hdr->b_size); 5620 ASSERT3U(BP_GET_LSIZE(&cb->l2rcb_bp), ==, hdr->b_size); 5621 5622 /* 5623 * Check this survived the L2ARC journey. 5624 */ 5625 equal = arc_cksum_equal(buf); 5626 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 5627 mutex_exit(hash_lock); 5628 zio->io_private = buf; 5629 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 5630 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 5631 arc_read_done(zio); 5632 } else { 5633 mutex_exit(hash_lock); 5634 /* 5635 * Buffer didn't survive caching. Increment stats and 5636 * reissue to the original storage device. 5637 */ 5638 if (zio->io_error != 0) { 5639 ARCSTAT_BUMP(arcstat_l2_io_error); 5640 } else { 5641 zio->io_error = SET_ERROR(EIO); 5642 } 5643 if (!equal) 5644 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 5645 5646 /* 5647 * If there's no waiter, issue an async i/o to the primary 5648 * storage now. If there *is* a waiter, the caller must 5649 * issue the i/o in a context where it's OK to block. 5650 */ 5651 if (zio->io_waiter == NULL) { 5652 zio_t *pio = zio_unique_parent(zio); 5653 5654 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 5655 5656 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 5657 buf->b_data, hdr->b_size, arc_read_done, buf, 5658 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 5659 } 5660 } 5661 5662 kmem_free(cb, sizeof (l2arc_read_callback_t)); 5663 } 5664 5665 /* 5666 * This is the list priority from which the L2ARC will search for pages to 5667 * cache. This is used within loops (0..3) to cycle through lists in the 5668 * desired order. This order can have a significant effect on cache 5669 * performance. 5670 * 5671 * Currently the metadata lists are hit first, MFU then MRU, followed by 5672 * the data lists. This function returns a locked list, and also returns 5673 * the lock pointer. 5674 */ 5675 static multilist_sublist_t * 5676 l2arc_sublist_lock(int list_num) 5677 { 5678 multilist_t *ml = NULL; 5679 unsigned int idx; 5680 5681 ASSERT(list_num >= 0 && list_num <= 3); 5682 5683 switch (list_num) { 5684 case 0: 5685 ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 5686 break; 5687 case 1: 5688 ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 5689 break; 5690 case 2: 5691 ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 5692 break; 5693 case 3: 5694 ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; 5695 break; 5696 } 5697 5698 /* 5699 * Return a randomly-selected sublist. This is acceptable 5700 * because the caller feeds only a little bit of data for each 5701 * call (8MB). Subsequent calls will result in different 5702 * sublists being selected. 5703 */ 5704 idx = multilist_get_random_index(ml); 5705 return (multilist_sublist_lock(ml, idx)); 5706 } 5707 5708 /* 5709 * Evict buffers from the device write hand to the distance specified in 5710 * bytes. This distance may span populated buffers, it may span nothing. 5711 * This is clearing a region on the L2ARC device ready for writing. 5712 * If the 'all' boolean is set, every buffer is evicted. 5713 */ 5714 static void 5715 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 5716 { 5717 list_t *buflist; 5718 arc_buf_hdr_t *hdr, *hdr_prev; 5719 kmutex_t *hash_lock; 5720 uint64_t taddr; 5721 5722 buflist = &dev->l2ad_buflist; 5723 5724 if (!all && dev->l2ad_first) { 5725 /* 5726 * This is the first sweep through the device. There is 5727 * nothing to evict. 5728 */ 5729 return; 5730 } 5731 5732 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 5733 /* 5734 * When nearing the end of the device, evict to the end 5735 * before the device write hand jumps to the start. 5736 */ 5737 taddr = dev->l2ad_end; 5738 } else { 5739 taddr = dev->l2ad_hand + distance; 5740 } 5741 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 5742 uint64_t, taddr, boolean_t, all); 5743 5744 top: 5745 mutex_enter(&dev->l2ad_mtx); 5746 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 5747 hdr_prev = list_prev(buflist, hdr); 5748 5749 hash_lock = HDR_LOCK(hdr); 5750 5751 /* 5752 * We cannot use mutex_enter or else we can deadlock 5753 * with l2arc_write_buffers (due to swapping the order 5754 * the hash lock and l2ad_mtx are taken). 5755 */ 5756 if (!mutex_tryenter(hash_lock)) { 5757 /* 5758 * Missed the hash lock. Retry. 5759 */ 5760 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 5761 mutex_exit(&dev->l2ad_mtx); 5762 mutex_enter(hash_lock); 5763 mutex_exit(hash_lock); 5764 goto top; 5765 } 5766 5767 if (HDR_L2_WRITE_HEAD(hdr)) { 5768 /* 5769 * We hit a write head node. Leave it for 5770 * l2arc_write_done(). 5771 */ 5772 list_remove(buflist, hdr); 5773 mutex_exit(hash_lock); 5774 continue; 5775 } 5776 5777 if (!all && HDR_HAS_L2HDR(hdr) && 5778 (hdr->b_l2hdr.b_daddr > taddr || 5779 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { 5780 /* 5781 * We've evicted to the target address, 5782 * or the end of the device. 5783 */ 5784 mutex_exit(hash_lock); 5785 break; 5786 } 5787 5788 ASSERT(HDR_HAS_L2HDR(hdr)); 5789 if (!HDR_HAS_L1HDR(hdr)) { 5790 ASSERT(!HDR_L2_READING(hdr)); 5791 /* 5792 * This doesn't exist in the ARC. Destroy. 5793 * arc_hdr_destroy() will call list_remove() 5794 * and decrement arcstat_l2_size. 5795 */ 5796 arc_change_state(arc_anon, hdr, hash_lock); 5797 arc_hdr_destroy(hdr); 5798 } else { 5799 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); 5800 ARCSTAT_BUMP(arcstat_l2_evict_l1cached); 5801 /* 5802 * Invalidate issued or about to be issued 5803 * reads, since we may be about to write 5804 * over this location. 5805 */ 5806 if (HDR_L2_READING(hdr)) { 5807 ARCSTAT_BUMP(arcstat_l2_evict_reading); 5808 hdr->b_flags |= ARC_FLAG_L2_EVICTED; 5809 } 5810 5811 /* Ensure this header has finished being written */ 5812 ASSERT(!HDR_L2_WRITING(hdr)); 5813 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); 5814 5815 arc_hdr_l2hdr_destroy(hdr); 5816 } 5817 mutex_exit(hash_lock); 5818 } 5819 mutex_exit(&dev->l2ad_mtx); 5820 } 5821 5822 /* 5823 * Find and write ARC buffers to the L2ARC device. 5824 * 5825 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 5826 * for reading until they have completed writing. 5827 * The headroom_boost is an in-out parameter used to maintain headroom boost 5828 * state between calls to this function. 5829 * 5830 * Returns the number of bytes actually written (which may be smaller than 5831 * the delta by which the device hand has changed due to alignment). 5832 */ 5833 static uint64_t 5834 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, 5835 boolean_t *headroom_boost) 5836 { 5837 arc_buf_hdr_t *hdr, *hdr_prev, *head; 5838 uint64_t write_asize, write_psize, write_sz, headroom, 5839 buf_compress_minsz; 5840 void *buf_data; 5841 boolean_t full; 5842 l2arc_write_callback_t *cb; 5843 zio_t *pio, *wzio; 5844 uint64_t guid = spa_load_guid(spa); 5845 const boolean_t do_headroom_boost = *headroom_boost; 5846 5847 ASSERT(dev->l2ad_vdev != NULL); 5848 5849 /* Lower the flag now, we might want to raise it again later. */ 5850 *headroom_boost = B_FALSE; 5851 5852 pio = NULL; 5853 write_sz = write_asize = write_psize = 0; 5854 full = B_FALSE; 5855 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); 5856 head->b_flags |= ARC_FLAG_L2_WRITE_HEAD; 5857 head->b_flags |= ARC_FLAG_HAS_L2HDR; 5858 5859 /* 5860 * We will want to try to compress buffers that are at least 2x the 5861 * device sector size. 5862 */ 5863 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift; 5864 5865 /* 5866 * Copy buffers for L2ARC writing. 5867 */ 5868 for (int try = 0; try <= 3; try++) { 5869 multilist_sublist_t *mls = l2arc_sublist_lock(try); 5870 uint64_t passed_sz = 0; 5871 5872 /* 5873 * L2ARC fast warmup. 5874 * 5875 * Until the ARC is warm and starts to evict, read from the 5876 * head of the ARC lists rather than the tail. 5877 */ 5878 if (arc_warm == B_FALSE) 5879 hdr = multilist_sublist_head(mls); 5880 else 5881 hdr = multilist_sublist_tail(mls); 5882 5883 headroom = target_sz * l2arc_headroom; 5884 if (do_headroom_boost) 5885 headroom = (headroom * l2arc_headroom_boost) / 100; 5886 5887 for (; hdr; hdr = hdr_prev) { 5888 kmutex_t *hash_lock; 5889 uint64_t buf_sz; 5890 5891 if (arc_warm == B_FALSE) 5892 hdr_prev = multilist_sublist_next(mls, hdr); 5893 else 5894 hdr_prev = multilist_sublist_prev(mls, hdr); 5895 5896 hash_lock = HDR_LOCK(hdr); 5897 if (!mutex_tryenter(hash_lock)) { 5898 /* 5899 * Skip this buffer rather than waiting. 5900 */ 5901 continue; 5902 } 5903 5904 passed_sz += hdr->b_size; 5905 if (passed_sz > headroom) { 5906 /* 5907 * Searched too far. 5908 */ 5909 mutex_exit(hash_lock); 5910 break; 5911 } 5912 5913 if (!l2arc_write_eligible(guid, hdr)) { 5914 mutex_exit(hash_lock); 5915 continue; 5916 } 5917 5918 if ((write_sz + hdr->b_size) > target_sz) { 5919 full = B_TRUE; 5920 mutex_exit(hash_lock); 5921 break; 5922 } 5923 5924 if (pio == NULL) { 5925 /* 5926 * Insert a dummy header on the buflist so 5927 * l2arc_write_done() can find where the 5928 * write buffers begin without searching. 5929 */ 5930 mutex_enter(&dev->l2ad_mtx); 5931 list_insert_head(&dev->l2ad_buflist, head); 5932 mutex_exit(&dev->l2ad_mtx); 5933 5934 cb = kmem_alloc( 5935 sizeof (l2arc_write_callback_t), KM_SLEEP); 5936 cb->l2wcb_dev = dev; 5937 cb->l2wcb_head = head; 5938 pio = zio_root(spa, l2arc_write_done, cb, 5939 ZIO_FLAG_CANFAIL); 5940 } 5941 5942 /* 5943 * Create and add a new L2ARC header. 5944 */ 5945 hdr->b_l2hdr.b_dev = dev; 5946 hdr->b_flags |= ARC_FLAG_L2_WRITING; 5947 /* 5948 * Temporarily stash the data buffer in b_tmp_cdata. 5949 * The subsequent write step will pick it up from 5950 * there. This is because can't access b_l1hdr.b_buf 5951 * without holding the hash_lock, which we in turn 5952 * can't access without holding the ARC list locks 5953 * (which we want to avoid during compression/writing). 5954 */ 5955 hdr->b_l2hdr.b_compress = ZIO_COMPRESS_OFF; 5956 hdr->b_l2hdr.b_asize = hdr->b_size; 5957 hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data; 5958 5959 /* 5960 * Explicitly set the b_daddr field to a known 5961 * value which means "invalid address". This 5962 * enables us to differentiate which stage of 5963 * l2arc_write_buffers() the particular header 5964 * is in (e.g. this loop, or the one below). 5965 * ARC_FLAG_L2_WRITING is not enough to make 5966 * this distinction, and we need to know in 5967 * order to do proper l2arc vdev accounting in 5968 * arc_release() and arc_hdr_destroy(). 5969 * 5970 * Note, we can't use a new flag to distinguish 5971 * the two stages because we don't hold the 5972 * header's hash_lock below, in the second stage 5973 * of this function. Thus, we can't simply 5974 * change the b_flags field to denote that the 5975 * IO has been sent. We can change the b_daddr 5976 * field of the L2 portion, though, since we'll 5977 * be holding the l2ad_mtx; which is why we're 5978 * using it to denote the header's state change. 5979 */ 5980 hdr->b_l2hdr.b_daddr = L2ARC_ADDR_UNSET; 5981 5982 buf_sz = hdr->b_size; 5983 hdr->b_flags |= ARC_FLAG_HAS_L2HDR; 5984 5985 mutex_enter(&dev->l2ad_mtx); 5986 list_insert_head(&dev->l2ad_buflist, hdr); 5987 mutex_exit(&dev->l2ad_mtx); 5988 5989 /* 5990 * Compute and store the buffer cksum before 5991 * writing. On debug the cksum is verified first. 5992 */ 5993 arc_cksum_verify(hdr->b_l1hdr.b_buf); 5994 arc_cksum_compute(hdr->b_l1hdr.b_buf, B_TRUE); 5995 5996 mutex_exit(hash_lock); 5997 5998 write_sz += buf_sz; 5999 } 6000 6001 multilist_sublist_unlock(mls); 6002 6003 if (full == B_TRUE) 6004 break; 6005 } 6006 6007 /* No buffers selected for writing? */ 6008 if (pio == NULL) { 6009 ASSERT0(write_sz); 6010 ASSERT(!HDR_HAS_L1HDR(head)); 6011 kmem_cache_free(hdr_l2only_cache, head); 6012 return (0); 6013 } 6014 6015 mutex_enter(&dev->l2ad_mtx); 6016 6017 /* 6018 * Now start writing the buffers. We're starting at the write head 6019 * and work backwards, retracing the course of the buffer selector 6020 * loop above. 6021 */ 6022 for (hdr = list_prev(&dev->l2ad_buflist, head); hdr; 6023 hdr = list_prev(&dev->l2ad_buflist, hdr)) { 6024 uint64_t buf_sz; 6025 6026 /* 6027 * We rely on the L1 portion of the header below, so 6028 * it's invalid for this header to have been evicted out 6029 * of the ghost cache, prior to being written out. The 6030 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 6031 */ 6032 ASSERT(HDR_HAS_L1HDR(hdr)); 6033 6034 /* 6035 * We shouldn't need to lock the buffer here, since we flagged 6036 * it as ARC_FLAG_L2_WRITING in the previous step, but we must 6037 * take care to only access its L2 cache parameters. In 6038 * particular, hdr->l1hdr.b_buf may be invalid by now due to 6039 * ARC eviction. 6040 */ 6041 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; 6042 6043 if ((HDR_L2COMPRESS(hdr)) && 6044 hdr->b_l2hdr.b_asize >= buf_compress_minsz) { 6045 if (l2arc_compress_buf(hdr)) { 6046 /* 6047 * If compression succeeded, enable headroom 6048 * boost on the next scan cycle. 6049 */ 6050 *headroom_boost = B_TRUE; 6051 } 6052 } 6053 6054 /* 6055 * Pick up the buffer data we had previously stashed away 6056 * (and now potentially also compressed). 6057 */ 6058 buf_data = hdr->b_l1hdr.b_tmp_cdata; 6059 buf_sz = hdr->b_l2hdr.b_asize; 6060 6061 /* 6062 * We need to do this regardless if buf_sz is zero or 6063 * not, otherwise, when this l2hdr is evicted we'll 6064 * remove a reference that was never added. 6065 */ 6066 (void) refcount_add_many(&dev->l2ad_alloc, buf_sz, hdr); 6067 6068 /* Compression may have squashed the buffer to zero length. */ 6069 if (buf_sz != 0) { 6070 uint64_t buf_p_sz; 6071 6072 wzio = zio_write_phys(pio, dev->l2ad_vdev, 6073 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 6074 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 6075 ZIO_FLAG_CANFAIL, B_FALSE); 6076 6077 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 6078 zio_t *, wzio); 6079 (void) zio_nowait(wzio); 6080 6081 write_asize += buf_sz; 6082 6083 /* 6084 * Keep the clock hand suitably device-aligned. 6085 */ 6086 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 6087 write_psize += buf_p_sz; 6088 dev->l2ad_hand += buf_p_sz; 6089 } 6090 } 6091 6092 mutex_exit(&dev->l2ad_mtx); 6093 6094 ASSERT3U(write_asize, <=, target_sz); 6095 ARCSTAT_BUMP(arcstat_l2_writes_sent); 6096 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); 6097 ARCSTAT_INCR(arcstat_l2_size, write_sz); 6098 ARCSTAT_INCR(arcstat_l2_asize, write_asize); 6099 vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0); 6100 6101 /* 6102 * Bump device hand to the device start if it is approaching the end. 6103 * l2arc_evict() will already have evicted ahead for this case. 6104 */ 6105 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 6106 dev->l2ad_hand = dev->l2ad_start; 6107 dev->l2ad_first = B_FALSE; 6108 } 6109 6110 dev->l2ad_writing = B_TRUE; 6111 (void) zio_wait(pio); 6112 dev->l2ad_writing = B_FALSE; 6113 6114 return (write_asize); 6115 } 6116 6117 /* 6118 * Compresses an L2ARC buffer. 6119 * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its 6120 * size in l2hdr->b_asize. This routine tries to compress the data and 6121 * depending on the compression result there are three possible outcomes: 6122 * *) The buffer was incompressible. The original l2hdr contents were left 6123 * untouched and are ready for writing to an L2 device. 6124 * *) The buffer was all-zeros, so there is no need to write it to an L2 6125 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is 6126 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY. 6127 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary 6128 * data buffer which holds the compressed data to be written, and b_asize 6129 * tells us how much data there is. b_compress is set to the appropriate 6130 * compression algorithm. Once writing is done, invoke 6131 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer. 6132 * 6133 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the 6134 * buffer was incompressible). 6135 */ 6136 static boolean_t 6137 l2arc_compress_buf(arc_buf_hdr_t *hdr) 6138 { 6139 void *cdata; 6140 size_t csize, len, rounded; 6141 ASSERT(HDR_HAS_L2HDR(hdr)); 6142 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 6143 6144 ASSERT(HDR_HAS_L1HDR(hdr)); 6145 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF); 6146 ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL); 6147 6148 len = l2hdr->b_asize; 6149 cdata = zio_data_buf_alloc(len); 6150 ASSERT3P(cdata, !=, NULL); 6151 csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata, 6152 cdata, l2hdr->b_asize); 6153 6154 rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE); 6155 if (rounded > csize) { 6156 bzero((char *)cdata + csize, rounded - csize); 6157 csize = rounded; 6158 } 6159 6160 if (csize == 0) { 6161 /* zero block, indicate that there's nothing to write */ 6162 zio_data_buf_free(cdata, len); 6163 l2hdr->b_compress = ZIO_COMPRESS_EMPTY; 6164 l2hdr->b_asize = 0; 6165 hdr->b_l1hdr.b_tmp_cdata = NULL; 6166 ARCSTAT_BUMP(arcstat_l2_compress_zeros); 6167 return (B_TRUE); 6168 } else if (csize > 0 && csize < len) { 6169 /* 6170 * Compression succeeded, we'll keep the cdata around for 6171 * writing and release it afterwards. 6172 */ 6173 l2hdr->b_compress = ZIO_COMPRESS_LZ4; 6174 l2hdr->b_asize = csize; 6175 hdr->b_l1hdr.b_tmp_cdata = cdata; 6176 ARCSTAT_BUMP(arcstat_l2_compress_successes); 6177 return (B_TRUE); 6178 } else { 6179 /* 6180 * Compression failed, release the compressed buffer. 6181 * l2hdr will be left unmodified. 6182 */ 6183 zio_data_buf_free(cdata, len); 6184 ARCSTAT_BUMP(arcstat_l2_compress_failures); 6185 return (B_FALSE); 6186 } 6187 } 6188 6189 /* 6190 * Decompresses a zio read back from an l2arc device. On success, the 6191 * underlying zio's io_data buffer is overwritten by the uncompressed 6192 * version. On decompression error (corrupt compressed stream), the 6193 * zio->io_error value is set to signal an I/O error. 6194 * 6195 * Please note that the compressed data stream is not checksummed, so 6196 * if the underlying device is experiencing data corruption, we may feed 6197 * corrupt data to the decompressor, so the decompressor needs to be 6198 * able to handle this situation (LZ4 does). 6199 */ 6200 static void 6201 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c) 6202 { 6203 ASSERT(L2ARC_IS_VALID_COMPRESS(c)); 6204 6205 if (zio->io_error != 0) { 6206 /* 6207 * An io error has occured, just restore the original io 6208 * size in preparation for a main pool read. 6209 */ 6210 zio->io_orig_size = zio->io_size = hdr->b_size; 6211 return; 6212 } 6213 6214 if (c == ZIO_COMPRESS_EMPTY) { 6215 /* 6216 * An empty buffer results in a null zio, which means we 6217 * need to fill its io_data after we're done restoring the 6218 * buffer's contents. 6219 */ 6220 ASSERT(hdr->b_l1hdr.b_buf != NULL); 6221 bzero(hdr->b_l1hdr.b_buf->b_data, hdr->b_size); 6222 zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_buf->b_data; 6223 } else { 6224 ASSERT(zio->io_data != NULL); 6225 /* 6226 * We copy the compressed data from the start of the arc buffer 6227 * (the zio_read will have pulled in only what we need, the 6228 * rest is garbage which we will overwrite at decompression) 6229 * and then decompress back to the ARC data buffer. This way we 6230 * can minimize copying by simply decompressing back over the 6231 * original compressed data (rather than decompressing to an 6232 * aux buffer and then copying back the uncompressed buffer, 6233 * which is likely to be much larger). 6234 */ 6235 uint64_t csize; 6236 void *cdata; 6237 6238 csize = zio->io_size; 6239 cdata = zio_data_buf_alloc(csize); 6240 bcopy(zio->io_data, cdata, csize); 6241 if (zio_decompress_data(c, cdata, zio->io_data, csize, 6242 hdr->b_size) != 0) 6243 zio->io_error = EIO; 6244 zio_data_buf_free(cdata, csize); 6245 } 6246 6247 /* Restore the expected uncompressed IO size. */ 6248 zio->io_orig_size = zio->io_size = hdr->b_size; 6249 } 6250 6251 /* 6252 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure. 6253 * This buffer serves as a temporary holder of compressed data while 6254 * the buffer entry is being written to an l2arc device. Once that is 6255 * done, we can dispose of it. 6256 */ 6257 static void 6258 l2arc_release_cdata_buf(arc_buf_hdr_t *hdr) 6259 { 6260 ASSERT(HDR_HAS_L2HDR(hdr)); 6261 enum zio_compress comp = hdr->b_l2hdr.b_compress; 6262 6263 ASSERT(HDR_HAS_L1HDR(hdr)); 6264 ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp)); 6265 6266 if (comp == ZIO_COMPRESS_OFF) { 6267 /* 6268 * In this case, b_tmp_cdata points to the same buffer 6269 * as the arc_buf_t's b_data field. We don't want to 6270 * free it, since the arc_buf_t will handle that. 6271 */ 6272 hdr->b_l1hdr.b_tmp_cdata = NULL; 6273 } else if (comp == ZIO_COMPRESS_EMPTY) { 6274 /* 6275 * In this case, b_tmp_cdata was compressed to an empty 6276 * buffer, thus there's nothing to free and b_tmp_cdata 6277 * should have been set to NULL in l2arc_write_buffers(). 6278 */ 6279 ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); 6280 } else { 6281 /* 6282 * If the data was compressed, then we've allocated a 6283 * temporary buffer for it, so now we need to release it. 6284 */ 6285 ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL); 6286 zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata, 6287 hdr->b_size); 6288 hdr->b_l1hdr.b_tmp_cdata = NULL; 6289 } 6290 6291 } 6292 6293 /* 6294 * This thread feeds the L2ARC at regular intervals. This is the beating 6295 * heart of the L2ARC. 6296 */ 6297 static void 6298 l2arc_feed_thread(void) 6299 { 6300 callb_cpr_t cpr; 6301 l2arc_dev_t *dev; 6302 spa_t *spa; 6303 uint64_t size, wrote; 6304 clock_t begin, next = ddi_get_lbolt(); 6305 boolean_t headroom_boost = B_FALSE; 6306 6307 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 6308 6309 mutex_enter(&l2arc_feed_thr_lock); 6310 6311 while (l2arc_thread_exit == 0) { 6312 CALLB_CPR_SAFE_BEGIN(&cpr); 6313 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 6314 next); 6315 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 6316 next = ddi_get_lbolt() + hz; 6317 6318 /* 6319 * Quick check for L2ARC devices. 6320 */ 6321 mutex_enter(&l2arc_dev_mtx); 6322 if (l2arc_ndev == 0) { 6323 mutex_exit(&l2arc_dev_mtx); 6324 continue; 6325 } 6326 mutex_exit(&l2arc_dev_mtx); 6327 begin = ddi_get_lbolt(); 6328 6329 /* 6330 * This selects the next l2arc device to write to, and in 6331 * doing so the next spa to feed from: dev->l2ad_spa. This 6332 * will return NULL if there are now no l2arc devices or if 6333 * they are all faulted. 6334 * 6335 * If a device is returned, its spa's config lock is also 6336 * held to prevent device removal. l2arc_dev_get_next() 6337 * will grab and release l2arc_dev_mtx. 6338 */ 6339 if ((dev = l2arc_dev_get_next()) == NULL) 6340 continue; 6341 6342 spa = dev->l2ad_spa; 6343 ASSERT(spa != NULL); 6344 6345 /* 6346 * If the pool is read-only then force the feed thread to 6347 * sleep a little longer. 6348 */ 6349 if (!spa_writeable(spa)) { 6350 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 6351 spa_config_exit(spa, SCL_L2ARC, dev); 6352 continue; 6353 } 6354 6355 /* 6356 * Avoid contributing to memory pressure. 6357 */ 6358 if (arc_reclaim_needed()) { 6359 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 6360 spa_config_exit(spa, SCL_L2ARC, dev); 6361 continue; 6362 } 6363 6364 ARCSTAT_BUMP(arcstat_l2_feeds); 6365 6366 size = l2arc_write_size(); 6367 6368 /* 6369 * Evict L2ARC buffers that will be overwritten. 6370 */ 6371 l2arc_evict(dev, size, B_FALSE); 6372 6373 /* 6374 * Write ARC buffers. 6375 */ 6376 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost); 6377 6378 /* 6379 * Calculate interval between writes. 6380 */ 6381 next = l2arc_write_interval(begin, size, wrote); 6382 spa_config_exit(spa, SCL_L2ARC, dev); 6383 } 6384 6385 l2arc_thread_exit = 0; 6386 cv_broadcast(&l2arc_feed_thr_cv); 6387 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 6388 thread_exit(); 6389 } 6390 6391 boolean_t 6392 l2arc_vdev_present(vdev_t *vd) 6393 { 6394 l2arc_dev_t *dev; 6395 6396 mutex_enter(&l2arc_dev_mtx); 6397 for (dev = list_head(l2arc_dev_list); dev != NULL; 6398 dev = list_next(l2arc_dev_list, dev)) { 6399 if (dev->l2ad_vdev == vd) 6400 break; 6401 } 6402 mutex_exit(&l2arc_dev_mtx); 6403 6404 return (dev != NULL); 6405 } 6406 6407 /* 6408 * Add a vdev for use by the L2ARC. By this point the spa has already 6409 * validated the vdev and opened it. 6410 */ 6411 void 6412 l2arc_add_vdev(spa_t *spa, vdev_t *vd) 6413 { 6414 l2arc_dev_t *adddev; 6415 6416 ASSERT(!l2arc_vdev_present(vd)); 6417 6418 /* 6419 * Create a new l2arc device entry. 6420 */ 6421 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 6422 adddev->l2ad_spa = spa; 6423 adddev->l2ad_vdev = vd; 6424 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 6425 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 6426 adddev->l2ad_hand = adddev->l2ad_start; 6427 adddev->l2ad_first = B_TRUE; 6428 adddev->l2ad_writing = B_FALSE; 6429 6430 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); 6431 /* 6432 * This is a list of all ARC buffers that are still valid on the 6433 * device. 6434 */ 6435 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 6436 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); 6437 6438 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 6439 refcount_create(&adddev->l2ad_alloc); 6440 6441 /* 6442 * Add device to global list 6443 */ 6444 mutex_enter(&l2arc_dev_mtx); 6445 list_insert_head(l2arc_dev_list, adddev); 6446 atomic_inc_64(&l2arc_ndev); 6447 mutex_exit(&l2arc_dev_mtx); 6448 } 6449 6450 /* 6451 * Remove a vdev from the L2ARC. 6452 */ 6453 void 6454 l2arc_remove_vdev(vdev_t *vd) 6455 { 6456 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 6457 6458 /* 6459 * Find the device by vdev 6460 */ 6461 mutex_enter(&l2arc_dev_mtx); 6462 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 6463 nextdev = list_next(l2arc_dev_list, dev); 6464 if (vd == dev->l2ad_vdev) { 6465 remdev = dev; 6466 break; 6467 } 6468 } 6469 ASSERT(remdev != NULL); 6470 6471 /* 6472 * Remove device from global list 6473 */ 6474 list_remove(l2arc_dev_list, remdev); 6475 l2arc_dev_last = NULL; /* may have been invalidated */ 6476 atomic_dec_64(&l2arc_ndev); 6477 mutex_exit(&l2arc_dev_mtx); 6478 6479 /* 6480 * Clear all buflists and ARC references. L2ARC device flush. 6481 */ 6482 l2arc_evict(remdev, 0, B_TRUE); 6483 list_destroy(&remdev->l2ad_buflist); 6484 mutex_destroy(&remdev->l2ad_mtx); 6485 refcount_destroy(&remdev->l2ad_alloc); 6486 kmem_free(remdev, sizeof (l2arc_dev_t)); 6487 } 6488 6489 void 6490 l2arc_init(void) 6491 { 6492 l2arc_thread_exit = 0; 6493 l2arc_ndev = 0; 6494 l2arc_writes_sent = 0; 6495 l2arc_writes_done = 0; 6496 6497 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 6498 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 6499 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 6500 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 6501 6502 l2arc_dev_list = &L2ARC_dev_list; 6503 l2arc_free_on_write = &L2ARC_free_on_write; 6504 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 6505 offsetof(l2arc_dev_t, l2ad_node)); 6506 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 6507 offsetof(l2arc_data_free_t, l2df_list_node)); 6508 } 6509 6510 void 6511 l2arc_fini(void) 6512 { 6513 /* 6514 * This is called from dmu_fini(), which is called from spa_fini(); 6515 * Because of this, we can assume that all l2arc devices have 6516 * already been removed when the pools themselves were removed. 6517 */ 6518 6519 l2arc_do_free_on_write(); 6520 6521 mutex_destroy(&l2arc_feed_thr_lock); 6522 cv_destroy(&l2arc_feed_thr_cv); 6523 mutex_destroy(&l2arc_dev_mtx); 6524 mutex_destroy(&l2arc_free_on_write_mtx); 6525 6526 list_destroy(l2arc_dev_list); 6527 list_destroy(l2arc_free_on_write); 6528 } 6529 6530 void 6531 l2arc_start(void) 6532 { 6533 if (!(spa_mode_global & FWRITE)) 6534 return; 6535 6536 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 6537 TS_RUN, minclsyspri); 6538 } 6539 6540 void 6541 l2arc_stop(void) 6542 { 6543 if (!(spa_mode_global & FWRITE)) 6544 return; 6545 6546 mutex_enter(&l2arc_feed_thr_lock); 6547 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 6548 l2arc_thread_exit = 1; 6549 while (l2arc_thread_exit != 0) 6550 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 6551 mutex_exit(&l2arc_feed_thr_lock); 6552 }