Print this page
4185 New hash algorithm support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/arc.c
+++ new/usr/src/uts/common/fs/zfs/arc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 25 * Copyright (c) 2013 by Delphix. All rights reserved.
26 26 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * DVA-based Adjustable Replacement Cache
31 31 *
32 32 * While much of the theory of operation used here is
33 33 * based on the self-tuning, low overhead replacement cache
34 34 * presented by Megiddo and Modha at FAST 2003, there are some
35 35 * significant differences:
36 36 *
37 37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 38 * Pages in its cache cannot be "locked" into memory. This makes
39 39 * the eviction algorithm simple: evict the last page in the list.
40 40 * This also make the performance characteristics easy to reason
41 41 * about. Our cache is not so simple. At any given moment, some
42 42 * subset of the blocks in the cache are un-evictable because we
43 43 * have handed out a reference to them. Blocks are only evictable
44 44 * when there are no external references active. This makes
45 45 * eviction far more problematic: we choose to evict the evictable
46 46 * blocks that are the "lowest" in the list.
47 47 *
48 48 * There are times when it is not possible to evict the requested
49 49 * space. In these circumstances we are unable to adjust the cache
50 50 * size. To prevent the cache growing unbounded at these times we
51 51 * implement a "cache throttle" that slows the flow of new data
52 52 * into the cache until we can make space available.
53 53 *
54 54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 55 * Pages are evicted when the cache is full and there is a cache
56 56 * miss. Our model has a variable sized cache. It grows with
57 57 * high use, but also tries to react to memory pressure from the
58 58 * operating system: decreasing its size when system memory is
59 59 * tight.
60 60 *
61 61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 62 * elements of the cache are therefore exactly the same size. So
63 63 * when adjusting the cache size following a cache miss, its simply
64 64 * a matter of choosing a single page to evict. In our model, we
65 65 * have variable sized cache blocks (rangeing from 512 bytes to
66 66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 67 * space for a cache miss that approximates as closely as possible
68 68 * the space used by the new block.
69 69 *
70 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 71 * by N. Megiddo & D. Modha, FAST 2003
72 72 */
73 73
74 74 /*
75 75 * The locking model:
76 76 *
77 77 * A new reference to a cache buffer can be obtained in two
78 78 * ways: 1) via a hash table lookup using the DVA as a key,
79 79 * or 2) via one of the ARC lists. The arc_read() interface
80 80 * uses method 1, while the internal arc algorithms for
81 81 * adjusting the cache use method 2. We therefore provide two
82 82 * types of locks: 1) the hash table lock array, and 2) the
83 83 * arc list locks.
84 84 *
85 85 * Buffers do not have their own mutexes, rather they rely on the
86 86 * hash table mutexes for the bulk of their protection (i.e. most
87 87 * fields in the arc_buf_hdr_t are protected by these mutexes).
88 88 *
89 89 * buf_hash_find() returns the appropriate mutex (held) when it
90 90 * locates the requested buffer in the hash table. It returns
91 91 * NULL for the mutex if the buffer was not in the table.
92 92 *
93 93 * buf_hash_remove() expects the appropriate hash mutex to be
94 94 * already held before it is invoked.
95 95 *
96 96 * Each arc state also has a mutex which is used to protect the
97 97 * buffer list associated with the state. When attempting to
98 98 * obtain a hash table lock while holding an arc list lock you
99 99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 100 * the active state mutex must be held before the ghost state mutex.
101 101 *
102 102 * Arc buffers may have an associated eviction callback function.
103 103 * This function will be invoked prior to removing the buffer (e.g.
104 104 * in arc_do_user_evicts()). Note however that the data associated
105 105 * with the buffer may be evicted prior to the callback. The callback
106 106 * must be made with *no locks held* (to prevent deadlock). Additionally,
107 107 * the users of callbacks must ensure that their private data is
108 108 * protected from simultaneous callbacks from arc_buf_evict()
109 109 * and arc_do_user_evicts().
110 110 *
111 111 * Note that the majority of the performance stats are manipulated
112 112 * with atomic operations.
113 113 *
114 114 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
115 115 *
116 116 * - L2ARC buflist creation
117 117 * - L2ARC buflist eviction
118 118 * - L2ARC write completion, which walks L2ARC buflists
119 119 * - ARC header destruction, as it removes from L2ARC buflists
120 120 * - ARC header release, as it removes from L2ARC buflists
121 121 */
122 122
123 123 #include <sys/spa.h>
124 124 #include <sys/zio.h>
125 125 #include <sys/zio_compress.h>
126 126 #include <sys/zfs_context.h>
127 127 #include <sys/arc.h>
128 128 #include <sys/refcount.h>
129 129 #include <sys/vdev.h>
130 130 #include <sys/vdev_impl.h>
131 131 #include <sys/dsl_pool.h>
132 132 #ifdef _KERNEL
133 133 #include <sys/vmsystm.h>
134 134 #include <vm/anon.h>
135 135 #include <sys/fs/swapnode.h>
136 136 #include <sys/dnlc.h>
137 137 #endif
138 138 #include <sys/callb.h>
139 139 #include <sys/kstat.h>
140 140 #include <zfs_fletcher.h>
141 141
142 142 #ifndef _KERNEL
143 143 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144 144 boolean_t arc_watch = B_FALSE;
145 145 int arc_procfd;
146 146 #endif
147 147
148 148 static kmutex_t arc_reclaim_thr_lock;
149 149 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
150 150 static uint8_t arc_thread_exit;
151 151
152 152 #define ARC_REDUCE_DNLC_PERCENT 3
153 153 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
154 154
155 155 typedef enum arc_reclaim_strategy {
156 156 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
157 157 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
158 158 } arc_reclaim_strategy_t;
159 159
160 160 /*
161 161 * The number of iterations through arc_evict_*() before we
162 162 * drop & reacquire the lock.
163 163 */
164 164 int arc_evict_iterations = 100;
165 165
166 166 /* number of seconds before growing cache again */
167 167 static int arc_grow_retry = 60;
168 168
169 169 /* shift of arc_c for calculating both min and max arc_p */
170 170 static int arc_p_min_shift = 4;
171 171
172 172 /* log2(fraction of arc to reclaim) */
173 173 static int arc_shrink_shift = 5;
174 174
175 175 /*
176 176 * minimum lifespan of a prefetch block in clock ticks
177 177 * (initialized in arc_init())
178 178 */
179 179 static int arc_min_prefetch_lifespan;
180 180
181 181 /*
182 182 * If this percent of memory is free, don't throttle.
183 183 */
184 184 int arc_lotsfree_percent = 10;
185 185
186 186 static int arc_dead;
187 187
188 188 /*
189 189 * The arc has filled available memory and has now warmed up.
190 190 */
191 191 static boolean_t arc_warm;
192 192
193 193 /*
194 194 * These tunables are for performance analysis.
195 195 */
196 196 uint64_t zfs_arc_max;
197 197 uint64_t zfs_arc_min;
198 198 uint64_t zfs_arc_meta_limit = 0;
199 199 int zfs_arc_grow_retry = 0;
200 200 int zfs_arc_shrink_shift = 0;
201 201 int zfs_arc_p_min_shift = 0;
202 202 int zfs_disable_dup_eviction = 0;
203 203
204 204 /*
205 205 * Note that buffers can be in one of 6 states:
206 206 * ARC_anon - anonymous (discussed below)
207 207 * ARC_mru - recently used, currently cached
208 208 * ARC_mru_ghost - recentely used, no longer in cache
209 209 * ARC_mfu - frequently used, currently cached
210 210 * ARC_mfu_ghost - frequently used, no longer in cache
211 211 * ARC_l2c_only - exists in L2ARC but not other states
212 212 * When there are no active references to the buffer, they are
213 213 * are linked onto a list in one of these arc states. These are
214 214 * the only buffers that can be evicted or deleted. Within each
215 215 * state there are multiple lists, one for meta-data and one for
216 216 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
217 217 * etc.) is tracked separately so that it can be managed more
218 218 * explicitly: favored over data, limited explicitly.
219 219 *
220 220 * Anonymous buffers are buffers that are not associated with
221 221 * a DVA. These are buffers that hold dirty block copies
222 222 * before they are written to stable storage. By definition,
223 223 * they are "ref'd" and are considered part of arc_mru
224 224 * that cannot be freed. Generally, they will aquire a DVA
225 225 * as they are written and migrate onto the arc_mru list.
226 226 *
227 227 * The ARC_l2c_only state is for buffers that are in the second
228 228 * level ARC but no longer in any of the ARC_m* lists. The second
229 229 * level ARC itself may also contain buffers that are in any of
230 230 * the ARC_m* states - meaning that a buffer can exist in two
231 231 * places. The reason for the ARC_l2c_only state is to keep the
232 232 * buffer header in the hash table, so that reads that hit the
233 233 * second level ARC benefit from these fast lookups.
234 234 */
235 235
236 236 typedef struct arc_state {
237 237 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
238 238 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
239 239 uint64_t arcs_size; /* total amount of data in this state */
240 240 kmutex_t arcs_mtx;
241 241 } arc_state_t;
242 242
243 243 /* The 6 states: */
244 244 static arc_state_t ARC_anon;
245 245 static arc_state_t ARC_mru;
246 246 static arc_state_t ARC_mru_ghost;
247 247 static arc_state_t ARC_mfu;
248 248 static arc_state_t ARC_mfu_ghost;
249 249 static arc_state_t ARC_l2c_only;
250 250
251 251 typedef struct arc_stats {
252 252 kstat_named_t arcstat_hits;
253 253 kstat_named_t arcstat_misses;
254 254 kstat_named_t arcstat_demand_data_hits;
255 255 kstat_named_t arcstat_demand_data_misses;
256 256 kstat_named_t arcstat_demand_metadata_hits;
257 257 kstat_named_t arcstat_demand_metadata_misses;
258 258 kstat_named_t arcstat_prefetch_data_hits;
259 259 kstat_named_t arcstat_prefetch_data_misses;
260 260 kstat_named_t arcstat_prefetch_metadata_hits;
261 261 kstat_named_t arcstat_prefetch_metadata_misses;
262 262 kstat_named_t arcstat_mru_hits;
263 263 kstat_named_t arcstat_mru_ghost_hits;
264 264 kstat_named_t arcstat_mfu_hits;
265 265 kstat_named_t arcstat_mfu_ghost_hits;
266 266 kstat_named_t arcstat_deleted;
267 267 kstat_named_t arcstat_recycle_miss;
268 268 /*
269 269 * Number of buffers that could not be evicted because the hash lock
270 270 * was held by another thread. The lock may not necessarily be held
271 271 * by something using the same buffer, since hash locks are shared
272 272 * by multiple buffers.
273 273 */
274 274 kstat_named_t arcstat_mutex_miss;
275 275 /*
276 276 * Number of buffers skipped because they have I/O in progress, are
277 277 * indrect prefetch buffers that have not lived long enough, or are
278 278 * not from the spa we're trying to evict from.
279 279 */
280 280 kstat_named_t arcstat_evict_skip;
281 281 kstat_named_t arcstat_evict_l2_cached;
282 282 kstat_named_t arcstat_evict_l2_eligible;
283 283 kstat_named_t arcstat_evict_l2_ineligible;
284 284 kstat_named_t arcstat_hash_elements;
285 285 kstat_named_t arcstat_hash_elements_max;
286 286 kstat_named_t arcstat_hash_collisions;
287 287 kstat_named_t arcstat_hash_chains;
288 288 kstat_named_t arcstat_hash_chain_max;
289 289 kstat_named_t arcstat_p;
290 290 kstat_named_t arcstat_c;
291 291 kstat_named_t arcstat_c_min;
292 292 kstat_named_t arcstat_c_max;
293 293 kstat_named_t arcstat_size;
294 294 kstat_named_t arcstat_hdr_size;
295 295 kstat_named_t arcstat_data_size;
296 296 kstat_named_t arcstat_other_size;
297 297 kstat_named_t arcstat_l2_hits;
298 298 kstat_named_t arcstat_l2_misses;
299 299 kstat_named_t arcstat_l2_feeds;
300 300 kstat_named_t arcstat_l2_rw_clash;
301 301 kstat_named_t arcstat_l2_read_bytes;
302 302 kstat_named_t arcstat_l2_write_bytes;
303 303 kstat_named_t arcstat_l2_writes_sent;
304 304 kstat_named_t arcstat_l2_writes_done;
305 305 kstat_named_t arcstat_l2_writes_error;
306 306 kstat_named_t arcstat_l2_writes_hdr_miss;
307 307 kstat_named_t arcstat_l2_evict_lock_retry;
308 308 kstat_named_t arcstat_l2_evict_reading;
309 309 kstat_named_t arcstat_l2_free_on_write;
310 310 kstat_named_t arcstat_l2_abort_lowmem;
311 311 kstat_named_t arcstat_l2_cksum_bad;
312 312 kstat_named_t arcstat_l2_io_error;
313 313 kstat_named_t arcstat_l2_size;
314 314 kstat_named_t arcstat_l2_asize;
315 315 kstat_named_t arcstat_l2_hdr_size;
316 316 kstat_named_t arcstat_l2_compress_successes;
317 317 kstat_named_t arcstat_l2_compress_zeros;
318 318 kstat_named_t arcstat_l2_compress_failures;
319 319 kstat_named_t arcstat_memory_throttle_count;
320 320 kstat_named_t arcstat_duplicate_buffers;
321 321 kstat_named_t arcstat_duplicate_buffers_size;
322 322 kstat_named_t arcstat_duplicate_reads;
323 323 kstat_named_t arcstat_meta_used;
324 324 kstat_named_t arcstat_meta_limit;
325 325 kstat_named_t arcstat_meta_max;
326 326 } arc_stats_t;
327 327
328 328 static arc_stats_t arc_stats = {
329 329 { "hits", KSTAT_DATA_UINT64 },
330 330 { "misses", KSTAT_DATA_UINT64 },
331 331 { "demand_data_hits", KSTAT_DATA_UINT64 },
332 332 { "demand_data_misses", KSTAT_DATA_UINT64 },
333 333 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
334 334 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
335 335 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
336 336 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
337 337 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
338 338 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
339 339 { "mru_hits", KSTAT_DATA_UINT64 },
340 340 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
341 341 { "mfu_hits", KSTAT_DATA_UINT64 },
342 342 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
343 343 { "deleted", KSTAT_DATA_UINT64 },
344 344 { "recycle_miss", KSTAT_DATA_UINT64 },
345 345 { "mutex_miss", KSTAT_DATA_UINT64 },
346 346 { "evict_skip", KSTAT_DATA_UINT64 },
347 347 { "evict_l2_cached", KSTAT_DATA_UINT64 },
348 348 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
349 349 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
350 350 { "hash_elements", KSTAT_DATA_UINT64 },
351 351 { "hash_elements_max", KSTAT_DATA_UINT64 },
352 352 { "hash_collisions", KSTAT_DATA_UINT64 },
353 353 { "hash_chains", KSTAT_DATA_UINT64 },
354 354 { "hash_chain_max", KSTAT_DATA_UINT64 },
355 355 { "p", KSTAT_DATA_UINT64 },
356 356 { "c", KSTAT_DATA_UINT64 },
357 357 { "c_min", KSTAT_DATA_UINT64 },
358 358 { "c_max", KSTAT_DATA_UINT64 },
359 359 { "size", KSTAT_DATA_UINT64 },
360 360 { "hdr_size", KSTAT_DATA_UINT64 },
361 361 { "data_size", KSTAT_DATA_UINT64 },
362 362 { "other_size", KSTAT_DATA_UINT64 },
363 363 { "l2_hits", KSTAT_DATA_UINT64 },
364 364 { "l2_misses", KSTAT_DATA_UINT64 },
365 365 { "l2_feeds", KSTAT_DATA_UINT64 },
366 366 { "l2_rw_clash", KSTAT_DATA_UINT64 },
367 367 { "l2_read_bytes", KSTAT_DATA_UINT64 },
368 368 { "l2_write_bytes", KSTAT_DATA_UINT64 },
369 369 { "l2_writes_sent", KSTAT_DATA_UINT64 },
370 370 { "l2_writes_done", KSTAT_DATA_UINT64 },
371 371 { "l2_writes_error", KSTAT_DATA_UINT64 },
372 372 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
373 373 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
374 374 { "l2_evict_reading", KSTAT_DATA_UINT64 },
375 375 { "l2_free_on_write", KSTAT_DATA_UINT64 },
376 376 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
377 377 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
378 378 { "l2_io_error", KSTAT_DATA_UINT64 },
379 379 { "l2_size", KSTAT_DATA_UINT64 },
380 380 { "l2_asize", KSTAT_DATA_UINT64 },
381 381 { "l2_hdr_size", KSTAT_DATA_UINT64 },
382 382 { "l2_compress_successes", KSTAT_DATA_UINT64 },
383 383 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
384 384 { "l2_compress_failures", KSTAT_DATA_UINT64 },
385 385 { "memory_throttle_count", KSTAT_DATA_UINT64 },
386 386 { "duplicate_buffers", KSTAT_DATA_UINT64 },
387 387 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
388 388 { "duplicate_reads", KSTAT_DATA_UINT64 },
389 389 { "arc_meta_used", KSTAT_DATA_UINT64 },
390 390 { "arc_meta_limit", KSTAT_DATA_UINT64 },
391 391 { "arc_meta_max", KSTAT_DATA_UINT64 }
392 392 };
393 393
394 394 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
395 395
396 396 #define ARCSTAT_INCR(stat, val) \
397 397 atomic_add_64(&arc_stats.stat.value.ui64, (val))
398 398
399 399 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
400 400 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
401 401
402 402 #define ARCSTAT_MAX(stat, val) { \
403 403 uint64_t m; \
404 404 while ((val) > (m = arc_stats.stat.value.ui64) && \
405 405 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
406 406 continue; \
407 407 }
408 408
409 409 #define ARCSTAT_MAXSTAT(stat) \
410 410 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
411 411
412 412 /*
413 413 * We define a macro to allow ARC hits/misses to be easily broken down by
414 414 * two separate conditions, giving a total of four different subtypes for
415 415 * each of hits and misses (so eight statistics total).
416 416 */
417 417 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
418 418 if (cond1) { \
419 419 if (cond2) { \
420 420 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
421 421 } else { \
422 422 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
423 423 } \
424 424 } else { \
425 425 if (cond2) { \
426 426 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
427 427 } else { \
428 428 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
429 429 } \
430 430 }
431 431
432 432 kstat_t *arc_ksp;
433 433 static arc_state_t *arc_anon;
434 434 static arc_state_t *arc_mru;
435 435 static arc_state_t *arc_mru_ghost;
436 436 static arc_state_t *arc_mfu;
437 437 static arc_state_t *arc_mfu_ghost;
438 438 static arc_state_t *arc_l2c_only;
439 439
440 440 /*
441 441 * There are several ARC variables that are critical to export as kstats --
442 442 * but we don't want to have to grovel around in the kstat whenever we wish to
443 443 * manipulate them. For these variables, we therefore define them to be in
444 444 * terms of the statistic variable. This assures that we are not introducing
445 445 * the possibility of inconsistency by having shadow copies of the variables,
446 446 * while still allowing the code to be readable.
447 447 */
448 448 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
449 449 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
450 450 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
451 451 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
452 452 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
453 453 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
454 454 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
455 455 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
456 456
457 457 #define L2ARC_IS_VALID_COMPRESS(_c_) \
458 458 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
459 459
460 460 static int arc_no_grow; /* Don't try to grow cache size */
461 461 static uint64_t arc_tempreserve;
462 462 static uint64_t arc_loaned_bytes;
463 463
464 464 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
465 465
466 466 typedef struct arc_callback arc_callback_t;
467 467
468 468 struct arc_callback {
469 469 void *acb_private;
470 470 arc_done_func_t *acb_done;
471 471 arc_buf_t *acb_buf;
472 472 zio_t *acb_zio_dummy;
473 473 arc_callback_t *acb_next;
474 474 };
475 475
476 476 typedef struct arc_write_callback arc_write_callback_t;
477 477
478 478 struct arc_write_callback {
479 479 void *awcb_private;
480 480 arc_done_func_t *awcb_ready;
481 481 arc_done_func_t *awcb_physdone;
482 482 arc_done_func_t *awcb_done;
483 483 arc_buf_t *awcb_buf;
484 484 };
485 485
486 486 struct arc_buf_hdr {
487 487 /* protected by hash lock */
488 488 dva_t b_dva;
489 489 uint64_t b_birth;
490 490 uint64_t b_cksum0;
491 491
492 492 kmutex_t b_freeze_lock;
493 493 zio_cksum_t *b_freeze_cksum;
494 494 void *b_thawed;
495 495
496 496 arc_buf_hdr_t *b_hash_next;
497 497 arc_buf_t *b_buf;
498 498 uint32_t b_flags;
499 499 uint32_t b_datacnt;
500 500
501 501 arc_callback_t *b_acb;
502 502 kcondvar_t b_cv;
503 503
504 504 /* immutable */
505 505 arc_buf_contents_t b_type;
506 506 uint64_t b_size;
507 507 uint64_t b_spa;
508 508
509 509 /* protected by arc state mutex */
510 510 arc_state_t *b_state;
511 511 list_node_t b_arc_node;
512 512
513 513 /* updated atomically */
514 514 clock_t b_arc_access;
515 515
516 516 /* self protecting */
517 517 refcount_t b_refcnt;
518 518
519 519 l2arc_buf_hdr_t *b_l2hdr;
520 520 list_node_t b_l2node;
521 521 };
522 522
523 523 static arc_buf_t *arc_eviction_list;
524 524 static kmutex_t arc_eviction_mtx;
525 525 static arc_buf_hdr_t arc_eviction_hdr;
526 526 static void arc_get_data_buf(arc_buf_t *buf);
527 527 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
528 528 static int arc_evict_needed(arc_buf_contents_t type);
529 529 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
530 530 static void arc_buf_watch(arc_buf_t *buf);
531 531
532 532 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
533 533
534 534 #define GHOST_STATE(state) \
535 535 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
536 536 (state) == arc_l2c_only)
537 537
538 538 /*
539 539 * Private ARC flags. These flags are private ARC only flags that will show up
540 540 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
541 541 * be passed in as arc_flags in things like arc_read. However, these flags
542 542 * should never be passed and should only be set by ARC code. When adding new
543 543 * public flags, make sure not to smash the private ones.
544 544 */
545 545
546 546 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
547 547 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
548 548 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
549 549 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
550 550 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
551 551 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
552 552 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
553 553 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
554 554 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
555 555 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
556 556
557 557 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
558 558 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
559 559 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
560 560 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
561 561 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
562 562 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
563 563 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
564 564 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
565 565 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
566 566 (hdr)->b_l2hdr != NULL)
567 567 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
568 568 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
569 569 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
570 570
571 571 /*
572 572 * Other sizes
573 573 */
574 574
575 575 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
576 576 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
577 577
578 578 /*
579 579 * Hash table routines
580 580 */
581 581
582 582 #define HT_LOCK_PAD 64
583 583
584 584 struct ht_lock {
585 585 kmutex_t ht_lock;
586 586 #ifdef _KERNEL
587 587 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
588 588 #endif
589 589 };
590 590
591 591 #define BUF_LOCKS 256
592 592 typedef struct buf_hash_table {
593 593 uint64_t ht_mask;
594 594 arc_buf_hdr_t **ht_table;
595 595 struct ht_lock ht_locks[BUF_LOCKS];
596 596 } buf_hash_table_t;
597 597
598 598 static buf_hash_table_t buf_hash_table;
599 599
600 600 #define BUF_HASH_INDEX(spa, dva, birth) \
601 601 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
602 602 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
603 603 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
604 604 #define HDR_LOCK(hdr) \
605 605 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
606 606
607 607 uint64_t zfs_crc64_table[256];
608 608
609 609 /*
610 610 * Level 2 ARC
611 611 */
612 612
613 613 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
614 614 #define L2ARC_HEADROOM 2 /* num of writes */
615 615 /*
616 616 * If we discover during ARC scan any buffers to be compressed, we boost
617 617 * our headroom for the next scanning cycle by this percentage multiple.
618 618 */
619 619 #define L2ARC_HEADROOM_BOOST 200
620 620 #define L2ARC_FEED_SECS 1 /* caching interval secs */
621 621 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
622 622
623 623 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
624 624 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
625 625
626 626 /* L2ARC Performance Tunables */
627 627 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
628 628 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
629 629 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
630 630 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
631 631 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
632 632 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
633 633 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
634 634 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
635 635 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
636 636
637 637 /*
638 638 * L2ARC Internals
639 639 */
640 640 typedef struct l2arc_dev {
641 641 vdev_t *l2ad_vdev; /* vdev */
642 642 spa_t *l2ad_spa; /* spa */
643 643 uint64_t l2ad_hand; /* next write location */
644 644 uint64_t l2ad_start; /* first addr on device */
645 645 uint64_t l2ad_end; /* last addr on device */
646 646 uint64_t l2ad_evict; /* last addr eviction reached */
647 647 boolean_t l2ad_first; /* first sweep through */
648 648 boolean_t l2ad_writing; /* currently writing */
649 649 list_t *l2ad_buflist; /* buffer list */
650 650 list_node_t l2ad_node; /* device list node */
651 651 } l2arc_dev_t;
652 652
653 653 static list_t L2ARC_dev_list; /* device list */
654 654 static list_t *l2arc_dev_list; /* device list pointer */
655 655 static kmutex_t l2arc_dev_mtx; /* device list mutex */
656 656 static l2arc_dev_t *l2arc_dev_last; /* last device used */
657 657 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
658 658 static list_t L2ARC_free_on_write; /* free after write buf list */
659 659 static list_t *l2arc_free_on_write; /* free after write list ptr */
660 660 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
661 661 static uint64_t l2arc_ndev; /* number of devices */
662 662
663 663 typedef struct l2arc_read_callback {
664 664 arc_buf_t *l2rcb_buf; /* read buffer */
665 665 spa_t *l2rcb_spa; /* spa */
666 666 blkptr_t l2rcb_bp; /* original blkptr */
667 667 zbookmark_t l2rcb_zb; /* original bookmark */
668 668 int l2rcb_flags; /* original flags */
669 669 enum zio_compress l2rcb_compress; /* applied compress */
670 670 } l2arc_read_callback_t;
671 671
672 672 typedef struct l2arc_write_callback {
673 673 l2arc_dev_t *l2wcb_dev; /* device info */
674 674 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
675 675 } l2arc_write_callback_t;
676 676
677 677 struct l2arc_buf_hdr {
678 678 /* protected by arc_buf_hdr mutex */
679 679 l2arc_dev_t *b_dev; /* L2ARC device */
680 680 uint64_t b_daddr; /* disk address, offset byte */
681 681 /* compression applied to buffer data */
682 682 enum zio_compress b_compress;
683 683 /* real alloc'd buffer size depending on b_compress applied */
684 684 int b_asize;
685 685 /* temporary buffer holder for in-flight compressed data */
686 686 void *b_tmp_cdata;
687 687 };
688 688
689 689 typedef struct l2arc_data_free {
690 690 /* protected by l2arc_free_on_write_mtx */
691 691 void *l2df_data;
692 692 size_t l2df_size;
693 693 void (*l2df_func)(void *, size_t);
694 694 list_node_t l2df_list_node;
695 695 } l2arc_data_free_t;
696 696
697 697 static kmutex_t l2arc_feed_thr_lock;
698 698 static kcondvar_t l2arc_feed_thr_cv;
699 699 static uint8_t l2arc_thread_exit;
700 700
701 701 static void l2arc_read_done(zio_t *zio);
702 702 static void l2arc_hdr_stat_add(void);
703 703 static void l2arc_hdr_stat_remove(void);
704 704
705 705 static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
706 706 static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
707 707 enum zio_compress c);
708 708 static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
709 709
710 710 static uint64_t
711 711 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
712 712 {
713 713 uint8_t *vdva = (uint8_t *)dva;
714 714 uint64_t crc = -1ULL;
715 715 int i;
716 716
717 717 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
718 718
719 719 for (i = 0; i < sizeof (dva_t); i++)
720 720 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
721 721
722 722 crc ^= (spa>>8) ^ birth;
723 723
724 724 return (crc);
725 725 }
726 726
727 727 #define BUF_EMPTY(buf) \
728 728 ((buf)->b_dva.dva_word[0] == 0 && \
729 729 (buf)->b_dva.dva_word[1] == 0 && \
730 730 (buf)->b_birth == 0)
731 731
732 732 #define BUF_EQUAL(spa, dva, birth, buf) \
733 733 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
734 734 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
735 735 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
736 736
737 737 static void
738 738 buf_discard_identity(arc_buf_hdr_t *hdr)
739 739 {
740 740 hdr->b_dva.dva_word[0] = 0;
741 741 hdr->b_dva.dva_word[1] = 0;
742 742 hdr->b_birth = 0;
743 743 hdr->b_cksum0 = 0;
744 744 }
745 745
746 746 static arc_buf_hdr_t *
747 747 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
748 748 {
749 749 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
750 750 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
751 751 arc_buf_hdr_t *buf;
752 752
753 753 mutex_enter(hash_lock);
754 754 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
755 755 buf = buf->b_hash_next) {
756 756 if (BUF_EQUAL(spa, dva, birth, buf)) {
757 757 *lockp = hash_lock;
758 758 return (buf);
759 759 }
760 760 }
761 761 mutex_exit(hash_lock);
762 762 *lockp = NULL;
763 763 return (NULL);
764 764 }
765 765
766 766 /*
767 767 * Insert an entry into the hash table. If there is already an element
768 768 * equal to elem in the hash table, then the already existing element
769 769 * will be returned and the new element will not be inserted.
770 770 * Otherwise returns NULL.
771 771 */
772 772 static arc_buf_hdr_t *
773 773 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
774 774 {
775 775 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
776 776 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
777 777 arc_buf_hdr_t *fbuf;
778 778 uint32_t i;
779 779
780 780 ASSERT(!HDR_IN_HASH_TABLE(buf));
781 781 *lockp = hash_lock;
782 782 mutex_enter(hash_lock);
783 783 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
784 784 fbuf = fbuf->b_hash_next, i++) {
785 785 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
786 786 return (fbuf);
787 787 }
788 788
789 789 buf->b_hash_next = buf_hash_table.ht_table[idx];
790 790 buf_hash_table.ht_table[idx] = buf;
791 791 buf->b_flags |= ARC_IN_HASH_TABLE;
792 792
793 793 /* collect some hash table performance data */
794 794 if (i > 0) {
795 795 ARCSTAT_BUMP(arcstat_hash_collisions);
796 796 if (i == 1)
797 797 ARCSTAT_BUMP(arcstat_hash_chains);
798 798
799 799 ARCSTAT_MAX(arcstat_hash_chain_max, i);
800 800 }
801 801
802 802 ARCSTAT_BUMP(arcstat_hash_elements);
803 803 ARCSTAT_MAXSTAT(arcstat_hash_elements);
804 804
805 805 return (NULL);
806 806 }
807 807
808 808 static void
809 809 buf_hash_remove(arc_buf_hdr_t *buf)
810 810 {
811 811 arc_buf_hdr_t *fbuf, **bufp;
812 812 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
813 813
814 814 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
815 815 ASSERT(HDR_IN_HASH_TABLE(buf));
816 816
817 817 bufp = &buf_hash_table.ht_table[idx];
818 818 while ((fbuf = *bufp) != buf) {
819 819 ASSERT(fbuf != NULL);
820 820 bufp = &fbuf->b_hash_next;
821 821 }
822 822 *bufp = buf->b_hash_next;
823 823 buf->b_hash_next = NULL;
824 824 buf->b_flags &= ~ARC_IN_HASH_TABLE;
825 825
826 826 /* collect some hash table performance data */
827 827 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
828 828
829 829 if (buf_hash_table.ht_table[idx] &&
830 830 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
831 831 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
832 832 }
833 833
834 834 /*
835 835 * Global data structures and functions for the buf kmem cache.
836 836 */
837 837 static kmem_cache_t *hdr_cache;
838 838 static kmem_cache_t *buf_cache;
839 839
840 840 static void
841 841 buf_fini(void)
842 842 {
843 843 int i;
844 844
845 845 kmem_free(buf_hash_table.ht_table,
846 846 (buf_hash_table.ht_mask + 1) * sizeof (void *));
847 847 for (i = 0; i < BUF_LOCKS; i++)
848 848 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
849 849 kmem_cache_destroy(hdr_cache);
850 850 kmem_cache_destroy(buf_cache);
851 851 }
852 852
853 853 /*
854 854 * Constructor callback - called when the cache is empty
855 855 * and a new buf is requested.
856 856 */
857 857 /* ARGSUSED */
858 858 static int
859 859 hdr_cons(void *vbuf, void *unused, int kmflag)
860 860 {
861 861 arc_buf_hdr_t *buf = vbuf;
862 862
863 863 bzero(buf, sizeof (arc_buf_hdr_t));
864 864 refcount_create(&buf->b_refcnt);
865 865 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
866 866 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
867 867 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
868 868
869 869 return (0);
870 870 }
871 871
872 872 /* ARGSUSED */
873 873 static int
874 874 buf_cons(void *vbuf, void *unused, int kmflag)
875 875 {
876 876 arc_buf_t *buf = vbuf;
877 877
878 878 bzero(buf, sizeof (arc_buf_t));
879 879 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
880 880 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
881 881
882 882 return (0);
883 883 }
884 884
885 885 /*
886 886 * Destructor callback - called when a cached buf is
887 887 * no longer required.
888 888 */
889 889 /* ARGSUSED */
890 890 static void
891 891 hdr_dest(void *vbuf, void *unused)
892 892 {
893 893 arc_buf_hdr_t *buf = vbuf;
894 894
895 895 ASSERT(BUF_EMPTY(buf));
896 896 refcount_destroy(&buf->b_refcnt);
897 897 cv_destroy(&buf->b_cv);
898 898 mutex_destroy(&buf->b_freeze_lock);
899 899 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
900 900 }
901 901
902 902 /* ARGSUSED */
903 903 static void
904 904 buf_dest(void *vbuf, void *unused)
905 905 {
906 906 arc_buf_t *buf = vbuf;
907 907
908 908 mutex_destroy(&buf->b_evict_lock);
909 909 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
910 910 }
911 911
912 912 /*
913 913 * Reclaim callback -- invoked when memory is low.
914 914 */
915 915 /* ARGSUSED */
916 916 static void
917 917 hdr_recl(void *unused)
918 918 {
919 919 dprintf("hdr_recl called\n");
920 920 /*
921 921 * umem calls the reclaim func when we destroy the buf cache,
922 922 * which is after we do arc_fini().
923 923 */
924 924 if (!arc_dead)
925 925 cv_signal(&arc_reclaim_thr_cv);
926 926 }
927 927
928 928 static void
929 929 buf_init(void)
930 930 {
931 931 uint64_t *ct;
932 932 uint64_t hsize = 1ULL << 12;
933 933 int i, j;
934 934
935 935 /*
936 936 * The hash table is big enough to fill all of physical memory
937 937 * with an average 64K block size. The table will take up
938 938 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
939 939 */
940 940 while (hsize * 65536 < physmem * PAGESIZE)
941 941 hsize <<= 1;
942 942 retry:
943 943 buf_hash_table.ht_mask = hsize - 1;
944 944 buf_hash_table.ht_table =
945 945 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
946 946 if (buf_hash_table.ht_table == NULL) {
947 947 ASSERT(hsize > (1ULL << 8));
948 948 hsize >>= 1;
949 949 goto retry;
950 950 }
951 951
952 952 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
953 953 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
954 954 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
955 955 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
956 956
957 957 for (i = 0; i < 256; i++)
958 958 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
959 959 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
960 960
961 961 for (i = 0; i < BUF_LOCKS; i++) {
962 962 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
963 963 NULL, MUTEX_DEFAULT, NULL);
964 964 }
965 965 }
966 966
967 967 #define ARC_MINTIME (hz>>4) /* 62 ms */
968 968
969 969 static void
970 970 arc_cksum_verify(arc_buf_t *buf)
971 971 {
972 972 zio_cksum_t zc;
↓ open down ↓ |
972 lines elided |
↑ open up ↑ |
973 973
974 974 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
975 975 return;
976 976
977 977 mutex_enter(&buf->b_hdr->b_freeze_lock);
978 978 if (buf->b_hdr->b_freeze_cksum == NULL ||
979 979 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
980 980 mutex_exit(&buf->b_hdr->b_freeze_lock);
981 981 return;
982 982 }
983 - fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
983 + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, NULL, NULL, &zc);
984 984 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
985 985 panic("buffer modified while frozen!");
986 986 mutex_exit(&buf->b_hdr->b_freeze_lock);
987 987 }
988 988
989 989 static int
990 990 arc_cksum_equal(arc_buf_t *buf)
991 991 {
992 992 zio_cksum_t zc;
993 993 int equal;
994 994
995 995 mutex_enter(&buf->b_hdr->b_freeze_lock);
996 - fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
996 + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, NULL, NULL, &zc);
997 997 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
998 998 mutex_exit(&buf->b_hdr->b_freeze_lock);
999 999
1000 1000 return (equal);
1001 1001 }
1002 1002
1003 1003 static void
1004 1004 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1005 1005 {
1006 1006 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1007 1007 return;
1008 1008
1009 1009 mutex_enter(&buf->b_hdr->b_freeze_lock);
1010 1010 if (buf->b_hdr->b_freeze_cksum != NULL) {
1011 1011 mutex_exit(&buf->b_hdr->b_freeze_lock);
1012 1012 return;
1013 1013 }
1014 1014 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1015 - fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1015 + fletcher_2_native(buf->b_data, buf->b_hdr->b_size, NULL, NULL,
1016 1016 buf->b_hdr->b_freeze_cksum);
1017 1017 mutex_exit(&buf->b_hdr->b_freeze_lock);
1018 1018 arc_buf_watch(buf);
1019 1019 }
1020 1020
1021 1021 #ifndef _KERNEL
1022 1022 typedef struct procctl {
1023 1023 long cmd;
1024 1024 prwatch_t prwatch;
1025 1025 } procctl_t;
1026 1026 #endif
1027 1027
1028 1028 /* ARGSUSED */
1029 1029 static void
1030 1030 arc_buf_unwatch(arc_buf_t *buf)
1031 1031 {
1032 1032 #ifndef _KERNEL
1033 1033 if (arc_watch) {
1034 1034 int result;
1035 1035 procctl_t ctl;
1036 1036 ctl.cmd = PCWATCH;
1037 1037 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1038 1038 ctl.prwatch.pr_size = 0;
1039 1039 ctl.prwatch.pr_wflags = 0;
1040 1040 result = write(arc_procfd, &ctl, sizeof (ctl));
1041 1041 ASSERT3U(result, ==, sizeof (ctl));
1042 1042 }
1043 1043 #endif
1044 1044 }
1045 1045
1046 1046 /* ARGSUSED */
1047 1047 static void
1048 1048 arc_buf_watch(arc_buf_t *buf)
1049 1049 {
1050 1050 #ifndef _KERNEL
1051 1051 if (arc_watch) {
1052 1052 int result;
1053 1053 procctl_t ctl;
1054 1054 ctl.cmd = PCWATCH;
1055 1055 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1056 1056 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1057 1057 ctl.prwatch.pr_wflags = WA_WRITE;
1058 1058 result = write(arc_procfd, &ctl, sizeof (ctl));
1059 1059 ASSERT3U(result, ==, sizeof (ctl));
1060 1060 }
1061 1061 #endif
1062 1062 }
1063 1063
1064 1064 void
1065 1065 arc_buf_thaw(arc_buf_t *buf)
1066 1066 {
1067 1067 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1068 1068 if (buf->b_hdr->b_state != arc_anon)
1069 1069 panic("modifying non-anon buffer!");
1070 1070 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1071 1071 panic("modifying buffer while i/o in progress!");
1072 1072 arc_cksum_verify(buf);
1073 1073 }
1074 1074
1075 1075 mutex_enter(&buf->b_hdr->b_freeze_lock);
1076 1076 if (buf->b_hdr->b_freeze_cksum != NULL) {
1077 1077 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1078 1078 buf->b_hdr->b_freeze_cksum = NULL;
1079 1079 }
1080 1080
1081 1081 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1082 1082 if (buf->b_hdr->b_thawed)
1083 1083 kmem_free(buf->b_hdr->b_thawed, 1);
1084 1084 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1085 1085 }
1086 1086
1087 1087 mutex_exit(&buf->b_hdr->b_freeze_lock);
1088 1088
1089 1089 arc_buf_unwatch(buf);
1090 1090 }
1091 1091
1092 1092 void
1093 1093 arc_buf_freeze(arc_buf_t *buf)
1094 1094 {
1095 1095 kmutex_t *hash_lock;
1096 1096
1097 1097 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1098 1098 return;
1099 1099
1100 1100 hash_lock = HDR_LOCK(buf->b_hdr);
1101 1101 mutex_enter(hash_lock);
1102 1102
1103 1103 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1104 1104 buf->b_hdr->b_state == arc_anon);
1105 1105 arc_cksum_compute(buf, B_FALSE);
1106 1106 mutex_exit(hash_lock);
1107 1107
1108 1108 }
1109 1109
1110 1110 static void
1111 1111 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1112 1112 {
1113 1113 ASSERT(MUTEX_HELD(hash_lock));
1114 1114
1115 1115 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1116 1116 (ab->b_state != arc_anon)) {
1117 1117 uint64_t delta = ab->b_size * ab->b_datacnt;
1118 1118 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1119 1119 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1120 1120
1121 1121 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1122 1122 mutex_enter(&ab->b_state->arcs_mtx);
1123 1123 ASSERT(list_link_active(&ab->b_arc_node));
1124 1124 list_remove(list, ab);
1125 1125 if (GHOST_STATE(ab->b_state)) {
1126 1126 ASSERT0(ab->b_datacnt);
1127 1127 ASSERT3P(ab->b_buf, ==, NULL);
1128 1128 delta = ab->b_size;
1129 1129 }
1130 1130 ASSERT(delta > 0);
1131 1131 ASSERT3U(*size, >=, delta);
1132 1132 atomic_add_64(size, -delta);
1133 1133 mutex_exit(&ab->b_state->arcs_mtx);
1134 1134 /* remove the prefetch flag if we get a reference */
1135 1135 if (ab->b_flags & ARC_PREFETCH)
1136 1136 ab->b_flags &= ~ARC_PREFETCH;
1137 1137 }
1138 1138 }
1139 1139
1140 1140 static int
1141 1141 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1142 1142 {
1143 1143 int cnt;
1144 1144 arc_state_t *state = ab->b_state;
1145 1145
1146 1146 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1147 1147 ASSERT(!GHOST_STATE(state));
1148 1148
1149 1149 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1150 1150 (state != arc_anon)) {
1151 1151 uint64_t *size = &state->arcs_lsize[ab->b_type];
1152 1152
1153 1153 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1154 1154 mutex_enter(&state->arcs_mtx);
1155 1155 ASSERT(!list_link_active(&ab->b_arc_node));
1156 1156 list_insert_head(&state->arcs_list[ab->b_type], ab);
1157 1157 ASSERT(ab->b_datacnt > 0);
1158 1158 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1159 1159 mutex_exit(&state->arcs_mtx);
1160 1160 }
1161 1161 return (cnt);
1162 1162 }
1163 1163
1164 1164 /*
1165 1165 * Move the supplied buffer to the indicated state. The mutex
1166 1166 * for the buffer must be held by the caller.
1167 1167 */
1168 1168 static void
1169 1169 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1170 1170 {
1171 1171 arc_state_t *old_state = ab->b_state;
1172 1172 int64_t refcnt = refcount_count(&ab->b_refcnt);
1173 1173 uint64_t from_delta, to_delta;
1174 1174
1175 1175 ASSERT(MUTEX_HELD(hash_lock));
1176 1176 ASSERT3P(new_state, !=, old_state);
1177 1177 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1178 1178 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1179 1179 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1180 1180
1181 1181 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1182 1182
1183 1183 /*
1184 1184 * If this buffer is evictable, transfer it from the
1185 1185 * old state list to the new state list.
1186 1186 */
1187 1187 if (refcnt == 0) {
1188 1188 if (old_state != arc_anon) {
1189 1189 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1190 1190 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1191 1191
1192 1192 if (use_mutex)
1193 1193 mutex_enter(&old_state->arcs_mtx);
1194 1194
1195 1195 ASSERT(list_link_active(&ab->b_arc_node));
1196 1196 list_remove(&old_state->arcs_list[ab->b_type], ab);
1197 1197
1198 1198 /*
1199 1199 * If prefetching out of the ghost cache,
1200 1200 * we will have a non-zero datacnt.
1201 1201 */
1202 1202 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1203 1203 /* ghost elements have a ghost size */
1204 1204 ASSERT(ab->b_buf == NULL);
1205 1205 from_delta = ab->b_size;
1206 1206 }
1207 1207 ASSERT3U(*size, >=, from_delta);
1208 1208 atomic_add_64(size, -from_delta);
1209 1209
1210 1210 if (use_mutex)
1211 1211 mutex_exit(&old_state->arcs_mtx);
1212 1212 }
1213 1213 if (new_state != arc_anon) {
1214 1214 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1215 1215 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1216 1216
1217 1217 if (use_mutex)
1218 1218 mutex_enter(&new_state->arcs_mtx);
1219 1219
1220 1220 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1221 1221
1222 1222 /* ghost elements have a ghost size */
1223 1223 if (GHOST_STATE(new_state)) {
1224 1224 ASSERT(ab->b_datacnt == 0);
1225 1225 ASSERT(ab->b_buf == NULL);
1226 1226 to_delta = ab->b_size;
1227 1227 }
1228 1228 atomic_add_64(size, to_delta);
1229 1229
1230 1230 if (use_mutex)
1231 1231 mutex_exit(&new_state->arcs_mtx);
1232 1232 }
1233 1233 }
1234 1234
1235 1235 ASSERT(!BUF_EMPTY(ab));
1236 1236 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1237 1237 buf_hash_remove(ab);
1238 1238
1239 1239 /* adjust state sizes */
1240 1240 if (to_delta)
1241 1241 atomic_add_64(&new_state->arcs_size, to_delta);
1242 1242 if (from_delta) {
1243 1243 ASSERT3U(old_state->arcs_size, >=, from_delta);
1244 1244 atomic_add_64(&old_state->arcs_size, -from_delta);
1245 1245 }
1246 1246 ab->b_state = new_state;
1247 1247
1248 1248 /* adjust l2arc hdr stats */
1249 1249 if (new_state == arc_l2c_only)
1250 1250 l2arc_hdr_stat_add();
1251 1251 else if (old_state == arc_l2c_only)
1252 1252 l2arc_hdr_stat_remove();
1253 1253 }
1254 1254
1255 1255 void
1256 1256 arc_space_consume(uint64_t space, arc_space_type_t type)
1257 1257 {
1258 1258 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1259 1259
1260 1260 switch (type) {
1261 1261 case ARC_SPACE_DATA:
1262 1262 ARCSTAT_INCR(arcstat_data_size, space);
1263 1263 break;
1264 1264 case ARC_SPACE_OTHER:
1265 1265 ARCSTAT_INCR(arcstat_other_size, space);
1266 1266 break;
1267 1267 case ARC_SPACE_HDRS:
1268 1268 ARCSTAT_INCR(arcstat_hdr_size, space);
1269 1269 break;
1270 1270 case ARC_SPACE_L2HDRS:
1271 1271 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1272 1272 break;
1273 1273 }
1274 1274
1275 1275 ARCSTAT_INCR(arcstat_meta_used, space);
1276 1276 atomic_add_64(&arc_size, space);
1277 1277 }
1278 1278
1279 1279 void
1280 1280 arc_space_return(uint64_t space, arc_space_type_t type)
1281 1281 {
1282 1282 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1283 1283
1284 1284 switch (type) {
1285 1285 case ARC_SPACE_DATA:
1286 1286 ARCSTAT_INCR(arcstat_data_size, -space);
1287 1287 break;
1288 1288 case ARC_SPACE_OTHER:
1289 1289 ARCSTAT_INCR(arcstat_other_size, -space);
1290 1290 break;
1291 1291 case ARC_SPACE_HDRS:
1292 1292 ARCSTAT_INCR(arcstat_hdr_size, -space);
1293 1293 break;
1294 1294 case ARC_SPACE_L2HDRS:
1295 1295 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1296 1296 break;
1297 1297 }
1298 1298
1299 1299 ASSERT(arc_meta_used >= space);
1300 1300 if (arc_meta_max < arc_meta_used)
1301 1301 arc_meta_max = arc_meta_used;
1302 1302 ARCSTAT_INCR(arcstat_meta_used, -space);
1303 1303 ASSERT(arc_size >= space);
1304 1304 atomic_add_64(&arc_size, -space);
1305 1305 }
1306 1306
1307 1307 void *
1308 1308 arc_data_buf_alloc(uint64_t size)
1309 1309 {
1310 1310 if (arc_evict_needed(ARC_BUFC_DATA))
1311 1311 cv_signal(&arc_reclaim_thr_cv);
1312 1312 atomic_add_64(&arc_size, size);
1313 1313 return (zio_data_buf_alloc(size));
1314 1314 }
1315 1315
1316 1316 void
1317 1317 arc_data_buf_free(void *buf, uint64_t size)
1318 1318 {
1319 1319 zio_data_buf_free(buf, size);
1320 1320 ASSERT(arc_size >= size);
1321 1321 atomic_add_64(&arc_size, -size);
1322 1322 }
1323 1323
1324 1324 arc_buf_t *
1325 1325 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1326 1326 {
1327 1327 arc_buf_hdr_t *hdr;
1328 1328 arc_buf_t *buf;
1329 1329
1330 1330 ASSERT3U(size, >, 0);
1331 1331 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1332 1332 ASSERT(BUF_EMPTY(hdr));
1333 1333 hdr->b_size = size;
1334 1334 hdr->b_type = type;
1335 1335 hdr->b_spa = spa_load_guid(spa);
1336 1336 hdr->b_state = arc_anon;
1337 1337 hdr->b_arc_access = 0;
1338 1338 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1339 1339 buf->b_hdr = hdr;
1340 1340 buf->b_data = NULL;
1341 1341 buf->b_efunc = NULL;
1342 1342 buf->b_private = NULL;
1343 1343 buf->b_next = NULL;
1344 1344 hdr->b_buf = buf;
1345 1345 arc_get_data_buf(buf);
1346 1346 hdr->b_datacnt = 1;
1347 1347 hdr->b_flags = 0;
1348 1348 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1349 1349 (void) refcount_add(&hdr->b_refcnt, tag);
1350 1350
1351 1351 return (buf);
1352 1352 }
1353 1353
1354 1354 static char *arc_onloan_tag = "onloan";
1355 1355
1356 1356 /*
1357 1357 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1358 1358 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1359 1359 * buffers must be returned to the arc before they can be used by the DMU or
1360 1360 * freed.
1361 1361 */
1362 1362 arc_buf_t *
1363 1363 arc_loan_buf(spa_t *spa, int size)
1364 1364 {
1365 1365 arc_buf_t *buf;
1366 1366
1367 1367 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1368 1368
1369 1369 atomic_add_64(&arc_loaned_bytes, size);
1370 1370 return (buf);
1371 1371 }
1372 1372
1373 1373 /*
1374 1374 * Return a loaned arc buffer to the arc.
1375 1375 */
1376 1376 void
1377 1377 arc_return_buf(arc_buf_t *buf, void *tag)
1378 1378 {
1379 1379 arc_buf_hdr_t *hdr = buf->b_hdr;
1380 1380
1381 1381 ASSERT(buf->b_data != NULL);
1382 1382 (void) refcount_add(&hdr->b_refcnt, tag);
1383 1383 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1384 1384
1385 1385 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1386 1386 }
1387 1387
1388 1388 /* Detach an arc_buf from a dbuf (tag) */
1389 1389 void
1390 1390 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1391 1391 {
1392 1392 arc_buf_hdr_t *hdr;
1393 1393
1394 1394 ASSERT(buf->b_data != NULL);
1395 1395 hdr = buf->b_hdr;
1396 1396 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1397 1397 (void) refcount_remove(&hdr->b_refcnt, tag);
1398 1398 buf->b_efunc = NULL;
1399 1399 buf->b_private = NULL;
1400 1400
1401 1401 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1402 1402 }
1403 1403
1404 1404 static arc_buf_t *
1405 1405 arc_buf_clone(arc_buf_t *from)
1406 1406 {
1407 1407 arc_buf_t *buf;
1408 1408 arc_buf_hdr_t *hdr = from->b_hdr;
1409 1409 uint64_t size = hdr->b_size;
1410 1410
1411 1411 ASSERT(hdr->b_state != arc_anon);
1412 1412
1413 1413 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1414 1414 buf->b_hdr = hdr;
1415 1415 buf->b_data = NULL;
1416 1416 buf->b_efunc = NULL;
1417 1417 buf->b_private = NULL;
1418 1418 buf->b_next = hdr->b_buf;
1419 1419 hdr->b_buf = buf;
1420 1420 arc_get_data_buf(buf);
1421 1421 bcopy(from->b_data, buf->b_data, size);
1422 1422
1423 1423 /*
1424 1424 * This buffer already exists in the arc so create a duplicate
1425 1425 * copy for the caller. If the buffer is associated with user data
1426 1426 * then track the size and number of duplicates. These stats will be
1427 1427 * updated as duplicate buffers are created and destroyed.
1428 1428 */
1429 1429 if (hdr->b_type == ARC_BUFC_DATA) {
1430 1430 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1431 1431 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1432 1432 }
1433 1433 hdr->b_datacnt += 1;
1434 1434 return (buf);
1435 1435 }
1436 1436
1437 1437 void
1438 1438 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1439 1439 {
1440 1440 arc_buf_hdr_t *hdr;
1441 1441 kmutex_t *hash_lock;
1442 1442
1443 1443 /*
1444 1444 * Check to see if this buffer is evicted. Callers
1445 1445 * must verify b_data != NULL to know if the add_ref
1446 1446 * was successful.
1447 1447 */
1448 1448 mutex_enter(&buf->b_evict_lock);
1449 1449 if (buf->b_data == NULL) {
1450 1450 mutex_exit(&buf->b_evict_lock);
1451 1451 return;
1452 1452 }
1453 1453 hash_lock = HDR_LOCK(buf->b_hdr);
1454 1454 mutex_enter(hash_lock);
1455 1455 hdr = buf->b_hdr;
1456 1456 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1457 1457 mutex_exit(&buf->b_evict_lock);
1458 1458
1459 1459 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1460 1460 add_reference(hdr, hash_lock, tag);
1461 1461 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1462 1462 arc_access(hdr, hash_lock);
1463 1463 mutex_exit(hash_lock);
1464 1464 ARCSTAT_BUMP(arcstat_hits);
1465 1465 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1466 1466 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1467 1467 data, metadata, hits);
1468 1468 }
1469 1469
1470 1470 /*
1471 1471 * Free the arc data buffer. If it is an l2arc write in progress,
1472 1472 * the buffer is placed on l2arc_free_on_write to be freed later.
1473 1473 */
1474 1474 static void
1475 1475 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1476 1476 {
1477 1477 arc_buf_hdr_t *hdr = buf->b_hdr;
1478 1478
1479 1479 if (HDR_L2_WRITING(hdr)) {
1480 1480 l2arc_data_free_t *df;
1481 1481 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1482 1482 df->l2df_data = buf->b_data;
1483 1483 df->l2df_size = hdr->b_size;
1484 1484 df->l2df_func = free_func;
1485 1485 mutex_enter(&l2arc_free_on_write_mtx);
1486 1486 list_insert_head(l2arc_free_on_write, df);
1487 1487 mutex_exit(&l2arc_free_on_write_mtx);
1488 1488 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1489 1489 } else {
1490 1490 free_func(buf->b_data, hdr->b_size);
1491 1491 }
1492 1492 }
1493 1493
1494 1494 static void
1495 1495 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1496 1496 {
1497 1497 arc_buf_t **bufp;
1498 1498
1499 1499 /* free up data associated with the buf */
1500 1500 if (buf->b_data) {
1501 1501 arc_state_t *state = buf->b_hdr->b_state;
1502 1502 uint64_t size = buf->b_hdr->b_size;
1503 1503 arc_buf_contents_t type = buf->b_hdr->b_type;
1504 1504
1505 1505 arc_cksum_verify(buf);
1506 1506 arc_buf_unwatch(buf);
1507 1507
1508 1508 if (!recycle) {
1509 1509 if (type == ARC_BUFC_METADATA) {
1510 1510 arc_buf_data_free(buf, zio_buf_free);
1511 1511 arc_space_return(size, ARC_SPACE_DATA);
1512 1512 } else {
1513 1513 ASSERT(type == ARC_BUFC_DATA);
1514 1514 arc_buf_data_free(buf, zio_data_buf_free);
1515 1515 ARCSTAT_INCR(arcstat_data_size, -size);
1516 1516 atomic_add_64(&arc_size, -size);
1517 1517 }
1518 1518 }
1519 1519 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1520 1520 uint64_t *cnt = &state->arcs_lsize[type];
1521 1521
1522 1522 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1523 1523 ASSERT(state != arc_anon);
1524 1524
1525 1525 ASSERT3U(*cnt, >=, size);
1526 1526 atomic_add_64(cnt, -size);
1527 1527 }
1528 1528 ASSERT3U(state->arcs_size, >=, size);
1529 1529 atomic_add_64(&state->arcs_size, -size);
1530 1530 buf->b_data = NULL;
1531 1531
1532 1532 /*
1533 1533 * If we're destroying a duplicate buffer make sure
1534 1534 * that the appropriate statistics are updated.
1535 1535 */
1536 1536 if (buf->b_hdr->b_datacnt > 1 &&
1537 1537 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1538 1538 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1539 1539 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1540 1540 }
1541 1541 ASSERT(buf->b_hdr->b_datacnt > 0);
1542 1542 buf->b_hdr->b_datacnt -= 1;
1543 1543 }
1544 1544
1545 1545 /* only remove the buf if requested */
1546 1546 if (!all)
1547 1547 return;
1548 1548
1549 1549 /* remove the buf from the hdr list */
1550 1550 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1551 1551 continue;
1552 1552 *bufp = buf->b_next;
1553 1553 buf->b_next = NULL;
1554 1554
1555 1555 ASSERT(buf->b_efunc == NULL);
1556 1556
1557 1557 /* clean up the buf */
1558 1558 buf->b_hdr = NULL;
1559 1559 kmem_cache_free(buf_cache, buf);
1560 1560 }
1561 1561
1562 1562 static void
1563 1563 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1564 1564 {
1565 1565 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1566 1566 ASSERT3P(hdr->b_state, ==, arc_anon);
1567 1567 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1568 1568 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1569 1569
1570 1570 if (l2hdr != NULL) {
1571 1571 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1572 1572 /*
1573 1573 * To prevent arc_free() and l2arc_evict() from
1574 1574 * attempting to free the same buffer at the same time,
1575 1575 * a FREE_IN_PROGRESS flag is given to arc_free() to
1576 1576 * give it priority. l2arc_evict() can't destroy this
1577 1577 * header while we are waiting on l2arc_buflist_mtx.
1578 1578 *
1579 1579 * The hdr may be removed from l2ad_buflist before we
1580 1580 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1581 1581 */
1582 1582 if (!buflist_held) {
1583 1583 mutex_enter(&l2arc_buflist_mtx);
1584 1584 l2hdr = hdr->b_l2hdr;
1585 1585 }
1586 1586
1587 1587 if (l2hdr != NULL) {
1588 1588 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1589 1589 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1590 1590 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1591 1591 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1592 1592 if (hdr->b_state == arc_l2c_only)
1593 1593 l2arc_hdr_stat_remove();
1594 1594 hdr->b_l2hdr = NULL;
1595 1595 }
1596 1596
1597 1597 if (!buflist_held)
1598 1598 mutex_exit(&l2arc_buflist_mtx);
1599 1599 }
1600 1600
1601 1601 if (!BUF_EMPTY(hdr)) {
1602 1602 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1603 1603 buf_discard_identity(hdr);
1604 1604 }
1605 1605 while (hdr->b_buf) {
1606 1606 arc_buf_t *buf = hdr->b_buf;
1607 1607
1608 1608 if (buf->b_efunc) {
1609 1609 mutex_enter(&arc_eviction_mtx);
1610 1610 mutex_enter(&buf->b_evict_lock);
1611 1611 ASSERT(buf->b_hdr != NULL);
1612 1612 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1613 1613 hdr->b_buf = buf->b_next;
1614 1614 buf->b_hdr = &arc_eviction_hdr;
1615 1615 buf->b_next = arc_eviction_list;
1616 1616 arc_eviction_list = buf;
1617 1617 mutex_exit(&buf->b_evict_lock);
1618 1618 mutex_exit(&arc_eviction_mtx);
1619 1619 } else {
1620 1620 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1621 1621 }
1622 1622 }
1623 1623 if (hdr->b_freeze_cksum != NULL) {
1624 1624 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1625 1625 hdr->b_freeze_cksum = NULL;
1626 1626 }
1627 1627 if (hdr->b_thawed) {
1628 1628 kmem_free(hdr->b_thawed, 1);
1629 1629 hdr->b_thawed = NULL;
1630 1630 }
1631 1631
1632 1632 ASSERT(!list_link_active(&hdr->b_arc_node));
1633 1633 ASSERT3P(hdr->b_hash_next, ==, NULL);
1634 1634 ASSERT3P(hdr->b_acb, ==, NULL);
1635 1635 kmem_cache_free(hdr_cache, hdr);
1636 1636 }
1637 1637
1638 1638 void
1639 1639 arc_buf_free(arc_buf_t *buf, void *tag)
1640 1640 {
1641 1641 arc_buf_hdr_t *hdr = buf->b_hdr;
1642 1642 int hashed = hdr->b_state != arc_anon;
1643 1643
1644 1644 ASSERT(buf->b_efunc == NULL);
1645 1645 ASSERT(buf->b_data != NULL);
1646 1646
1647 1647 if (hashed) {
1648 1648 kmutex_t *hash_lock = HDR_LOCK(hdr);
1649 1649
1650 1650 mutex_enter(hash_lock);
1651 1651 hdr = buf->b_hdr;
1652 1652 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1653 1653
1654 1654 (void) remove_reference(hdr, hash_lock, tag);
1655 1655 if (hdr->b_datacnt > 1) {
1656 1656 arc_buf_destroy(buf, FALSE, TRUE);
1657 1657 } else {
1658 1658 ASSERT(buf == hdr->b_buf);
1659 1659 ASSERT(buf->b_efunc == NULL);
1660 1660 hdr->b_flags |= ARC_BUF_AVAILABLE;
1661 1661 }
1662 1662 mutex_exit(hash_lock);
1663 1663 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1664 1664 int destroy_hdr;
1665 1665 /*
1666 1666 * We are in the middle of an async write. Don't destroy
1667 1667 * this buffer unless the write completes before we finish
1668 1668 * decrementing the reference count.
1669 1669 */
1670 1670 mutex_enter(&arc_eviction_mtx);
1671 1671 (void) remove_reference(hdr, NULL, tag);
1672 1672 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1673 1673 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1674 1674 mutex_exit(&arc_eviction_mtx);
1675 1675 if (destroy_hdr)
1676 1676 arc_hdr_destroy(hdr);
1677 1677 } else {
1678 1678 if (remove_reference(hdr, NULL, tag) > 0)
1679 1679 arc_buf_destroy(buf, FALSE, TRUE);
1680 1680 else
1681 1681 arc_hdr_destroy(hdr);
1682 1682 }
1683 1683 }
1684 1684
1685 1685 boolean_t
1686 1686 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1687 1687 {
1688 1688 arc_buf_hdr_t *hdr = buf->b_hdr;
1689 1689 kmutex_t *hash_lock = HDR_LOCK(hdr);
1690 1690 boolean_t no_callback = (buf->b_efunc == NULL);
1691 1691
1692 1692 if (hdr->b_state == arc_anon) {
1693 1693 ASSERT(hdr->b_datacnt == 1);
1694 1694 arc_buf_free(buf, tag);
1695 1695 return (no_callback);
1696 1696 }
1697 1697
1698 1698 mutex_enter(hash_lock);
1699 1699 hdr = buf->b_hdr;
1700 1700 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1701 1701 ASSERT(hdr->b_state != arc_anon);
1702 1702 ASSERT(buf->b_data != NULL);
1703 1703
1704 1704 (void) remove_reference(hdr, hash_lock, tag);
1705 1705 if (hdr->b_datacnt > 1) {
1706 1706 if (no_callback)
1707 1707 arc_buf_destroy(buf, FALSE, TRUE);
1708 1708 } else if (no_callback) {
1709 1709 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1710 1710 ASSERT(buf->b_efunc == NULL);
1711 1711 hdr->b_flags |= ARC_BUF_AVAILABLE;
1712 1712 }
1713 1713 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1714 1714 refcount_is_zero(&hdr->b_refcnt));
1715 1715 mutex_exit(hash_lock);
1716 1716 return (no_callback);
1717 1717 }
1718 1718
1719 1719 int
1720 1720 arc_buf_size(arc_buf_t *buf)
1721 1721 {
1722 1722 return (buf->b_hdr->b_size);
1723 1723 }
1724 1724
1725 1725 /*
1726 1726 * Called from the DMU to determine if the current buffer should be
1727 1727 * evicted. In order to ensure proper locking, the eviction must be initiated
1728 1728 * from the DMU. Return true if the buffer is associated with user data and
1729 1729 * duplicate buffers still exist.
1730 1730 */
1731 1731 boolean_t
1732 1732 arc_buf_eviction_needed(arc_buf_t *buf)
1733 1733 {
1734 1734 arc_buf_hdr_t *hdr;
1735 1735 boolean_t evict_needed = B_FALSE;
1736 1736
1737 1737 if (zfs_disable_dup_eviction)
1738 1738 return (B_FALSE);
1739 1739
1740 1740 mutex_enter(&buf->b_evict_lock);
1741 1741 hdr = buf->b_hdr;
1742 1742 if (hdr == NULL) {
1743 1743 /*
1744 1744 * We are in arc_do_user_evicts(); let that function
1745 1745 * perform the eviction.
1746 1746 */
1747 1747 ASSERT(buf->b_data == NULL);
1748 1748 mutex_exit(&buf->b_evict_lock);
1749 1749 return (B_FALSE);
1750 1750 } else if (buf->b_data == NULL) {
1751 1751 /*
1752 1752 * We have already been added to the arc eviction list;
1753 1753 * recommend eviction.
1754 1754 */
1755 1755 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1756 1756 mutex_exit(&buf->b_evict_lock);
1757 1757 return (B_TRUE);
1758 1758 }
1759 1759
1760 1760 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1761 1761 evict_needed = B_TRUE;
1762 1762
1763 1763 mutex_exit(&buf->b_evict_lock);
1764 1764 return (evict_needed);
1765 1765 }
1766 1766
1767 1767 /*
1768 1768 * Evict buffers from list until we've removed the specified number of
1769 1769 * bytes. Move the removed buffers to the appropriate evict state.
1770 1770 * If the recycle flag is set, then attempt to "recycle" a buffer:
1771 1771 * - look for a buffer to evict that is `bytes' long.
1772 1772 * - return the data block from this buffer rather than freeing it.
1773 1773 * This flag is used by callers that are trying to make space for a
1774 1774 * new buffer in a full arc cache.
1775 1775 *
1776 1776 * This function makes a "best effort". It skips over any buffers
1777 1777 * it can't get a hash_lock on, and so may not catch all candidates.
1778 1778 * It may also return without evicting as much space as requested.
1779 1779 */
1780 1780 static void *
1781 1781 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1782 1782 arc_buf_contents_t type)
1783 1783 {
1784 1784 arc_state_t *evicted_state;
1785 1785 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1786 1786 arc_buf_hdr_t *ab, *ab_prev = NULL;
1787 1787 list_t *list = &state->arcs_list[type];
1788 1788 kmutex_t *hash_lock;
1789 1789 boolean_t have_lock;
1790 1790 void *stolen = NULL;
1791 1791 arc_buf_hdr_t marker = { 0 };
1792 1792 int count = 0;
1793 1793
1794 1794 ASSERT(state == arc_mru || state == arc_mfu);
1795 1795
1796 1796 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1797 1797
1798 1798 mutex_enter(&state->arcs_mtx);
1799 1799 mutex_enter(&evicted_state->arcs_mtx);
1800 1800
1801 1801 for (ab = list_tail(list); ab; ab = ab_prev) {
1802 1802 ab_prev = list_prev(list, ab);
1803 1803 /* prefetch buffers have a minimum lifespan */
1804 1804 if (HDR_IO_IN_PROGRESS(ab) ||
1805 1805 (spa && ab->b_spa != spa) ||
1806 1806 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1807 1807 ddi_get_lbolt() - ab->b_arc_access <
1808 1808 arc_min_prefetch_lifespan)) {
1809 1809 skipped++;
1810 1810 continue;
1811 1811 }
1812 1812 /* "lookahead" for better eviction candidate */
1813 1813 if (recycle && ab->b_size != bytes &&
1814 1814 ab_prev && ab_prev->b_size == bytes)
1815 1815 continue;
1816 1816
1817 1817 /* ignore markers */
1818 1818 if (ab->b_spa == 0)
1819 1819 continue;
1820 1820
1821 1821 /*
1822 1822 * It may take a long time to evict all the bufs requested.
1823 1823 * To avoid blocking all arc activity, periodically drop
1824 1824 * the arcs_mtx and give other threads a chance to run
1825 1825 * before reacquiring the lock.
1826 1826 *
1827 1827 * If we are looking for a buffer to recycle, we are in
1828 1828 * the hot code path, so don't sleep.
1829 1829 */
1830 1830 if (!recycle && count++ > arc_evict_iterations) {
1831 1831 list_insert_after(list, ab, &marker);
1832 1832 mutex_exit(&evicted_state->arcs_mtx);
1833 1833 mutex_exit(&state->arcs_mtx);
1834 1834 kpreempt(KPREEMPT_SYNC);
1835 1835 mutex_enter(&state->arcs_mtx);
1836 1836 mutex_enter(&evicted_state->arcs_mtx);
1837 1837 ab_prev = list_prev(list, &marker);
1838 1838 list_remove(list, &marker);
1839 1839 count = 0;
1840 1840 continue;
1841 1841 }
1842 1842
1843 1843 hash_lock = HDR_LOCK(ab);
1844 1844 have_lock = MUTEX_HELD(hash_lock);
1845 1845 if (have_lock || mutex_tryenter(hash_lock)) {
1846 1846 ASSERT0(refcount_count(&ab->b_refcnt));
1847 1847 ASSERT(ab->b_datacnt > 0);
1848 1848 while (ab->b_buf) {
1849 1849 arc_buf_t *buf = ab->b_buf;
1850 1850 if (!mutex_tryenter(&buf->b_evict_lock)) {
1851 1851 missed += 1;
1852 1852 break;
1853 1853 }
1854 1854 if (buf->b_data) {
1855 1855 bytes_evicted += ab->b_size;
1856 1856 if (recycle && ab->b_type == type &&
1857 1857 ab->b_size == bytes &&
1858 1858 !HDR_L2_WRITING(ab)) {
1859 1859 stolen = buf->b_data;
1860 1860 recycle = FALSE;
1861 1861 }
1862 1862 }
1863 1863 if (buf->b_efunc) {
1864 1864 mutex_enter(&arc_eviction_mtx);
1865 1865 arc_buf_destroy(buf,
1866 1866 buf->b_data == stolen, FALSE);
1867 1867 ab->b_buf = buf->b_next;
1868 1868 buf->b_hdr = &arc_eviction_hdr;
1869 1869 buf->b_next = arc_eviction_list;
1870 1870 arc_eviction_list = buf;
1871 1871 mutex_exit(&arc_eviction_mtx);
1872 1872 mutex_exit(&buf->b_evict_lock);
1873 1873 } else {
1874 1874 mutex_exit(&buf->b_evict_lock);
1875 1875 arc_buf_destroy(buf,
1876 1876 buf->b_data == stolen, TRUE);
1877 1877 }
1878 1878 }
1879 1879
1880 1880 if (ab->b_l2hdr) {
1881 1881 ARCSTAT_INCR(arcstat_evict_l2_cached,
1882 1882 ab->b_size);
1883 1883 } else {
1884 1884 if (l2arc_write_eligible(ab->b_spa, ab)) {
1885 1885 ARCSTAT_INCR(arcstat_evict_l2_eligible,
1886 1886 ab->b_size);
1887 1887 } else {
1888 1888 ARCSTAT_INCR(
1889 1889 arcstat_evict_l2_ineligible,
1890 1890 ab->b_size);
1891 1891 }
1892 1892 }
1893 1893
1894 1894 if (ab->b_datacnt == 0) {
1895 1895 arc_change_state(evicted_state, ab, hash_lock);
1896 1896 ASSERT(HDR_IN_HASH_TABLE(ab));
1897 1897 ab->b_flags |= ARC_IN_HASH_TABLE;
1898 1898 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1899 1899 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1900 1900 }
1901 1901 if (!have_lock)
1902 1902 mutex_exit(hash_lock);
1903 1903 if (bytes >= 0 && bytes_evicted >= bytes)
1904 1904 break;
1905 1905 } else {
1906 1906 missed += 1;
1907 1907 }
1908 1908 }
1909 1909
1910 1910 mutex_exit(&evicted_state->arcs_mtx);
1911 1911 mutex_exit(&state->arcs_mtx);
1912 1912
1913 1913 if (bytes_evicted < bytes)
1914 1914 dprintf("only evicted %lld bytes from %x",
1915 1915 (longlong_t)bytes_evicted, state);
1916 1916
1917 1917 if (skipped)
1918 1918 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1919 1919
1920 1920 if (missed)
1921 1921 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1922 1922
1923 1923 /*
1924 1924 * Note: we have just evicted some data into the ghost state,
1925 1925 * potentially putting the ghost size over the desired size. Rather
1926 1926 * that evicting from the ghost list in this hot code path, leave
1927 1927 * this chore to the arc_reclaim_thread().
1928 1928 */
1929 1929
1930 1930 return (stolen);
1931 1931 }
1932 1932
1933 1933 /*
1934 1934 * Remove buffers from list until we've removed the specified number of
1935 1935 * bytes. Destroy the buffers that are removed.
1936 1936 */
1937 1937 static void
1938 1938 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1939 1939 {
1940 1940 arc_buf_hdr_t *ab, *ab_prev;
1941 1941 arc_buf_hdr_t marker = { 0 };
1942 1942 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1943 1943 kmutex_t *hash_lock;
1944 1944 uint64_t bytes_deleted = 0;
1945 1945 uint64_t bufs_skipped = 0;
1946 1946 int count = 0;
1947 1947
1948 1948 ASSERT(GHOST_STATE(state));
1949 1949 top:
1950 1950 mutex_enter(&state->arcs_mtx);
1951 1951 for (ab = list_tail(list); ab; ab = ab_prev) {
1952 1952 ab_prev = list_prev(list, ab);
1953 1953 if (ab->b_type > ARC_BUFC_NUMTYPES)
1954 1954 panic("invalid ab=%p", (void *)ab);
1955 1955 if (spa && ab->b_spa != spa)
1956 1956 continue;
1957 1957
1958 1958 /* ignore markers */
1959 1959 if (ab->b_spa == 0)
1960 1960 continue;
1961 1961
1962 1962 hash_lock = HDR_LOCK(ab);
1963 1963 /* caller may be trying to modify this buffer, skip it */
1964 1964 if (MUTEX_HELD(hash_lock))
1965 1965 continue;
1966 1966
1967 1967 /*
1968 1968 * It may take a long time to evict all the bufs requested.
1969 1969 * To avoid blocking all arc activity, periodically drop
1970 1970 * the arcs_mtx and give other threads a chance to run
1971 1971 * before reacquiring the lock.
1972 1972 */
1973 1973 if (count++ > arc_evict_iterations) {
1974 1974 list_insert_after(list, ab, &marker);
1975 1975 mutex_exit(&state->arcs_mtx);
1976 1976 kpreempt(KPREEMPT_SYNC);
1977 1977 mutex_enter(&state->arcs_mtx);
1978 1978 ab_prev = list_prev(list, &marker);
1979 1979 list_remove(list, &marker);
1980 1980 count = 0;
1981 1981 continue;
1982 1982 }
1983 1983 if (mutex_tryenter(hash_lock)) {
1984 1984 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1985 1985 ASSERT(ab->b_buf == NULL);
1986 1986 ARCSTAT_BUMP(arcstat_deleted);
1987 1987 bytes_deleted += ab->b_size;
1988 1988
1989 1989 if (ab->b_l2hdr != NULL) {
1990 1990 /*
1991 1991 * This buffer is cached on the 2nd Level ARC;
1992 1992 * don't destroy the header.
1993 1993 */
1994 1994 arc_change_state(arc_l2c_only, ab, hash_lock);
1995 1995 mutex_exit(hash_lock);
1996 1996 } else {
1997 1997 arc_change_state(arc_anon, ab, hash_lock);
1998 1998 mutex_exit(hash_lock);
1999 1999 arc_hdr_destroy(ab);
2000 2000 }
2001 2001
2002 2002 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2003 2003 if (bytes >= 0 && bytes_deleted >= bytes)
2004 2004 break;
2005 2005 } else if (bytes < 0) {
2006 2006 /*
2007 2007 * Insert a list marker and then wait for the
2008 2008 * hash lock to become available. Once its
2009 2009 * available, restart from where we left off.
2010 2010 */
2011 2011 list_insert_after(list, ab, &marker);
2012 2012 mutex_exit(&state->arcs_mtx);
2013 2013 mutex_enter(hash_lock);
2014 2014 mutex_exit(hash_lock);
2015 2015 mutex_enter(&state->arcs_mtx);
2016 2016 ab_prev = list_prev(list, &marker);
2017 2017 list_remove(list, &marker);
2018 2018 } else {
2019 2019 bufs_skipped += 1;
2020 2020 }
2021 2021
2022 2022 }
2023 2023 mutex_exit(&state->arcs_mtx);
2024 2024
2025 2025 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
2026 2026 (bytes < 0 || bytes_deleted < bytes)) {
2027 2027 list = &state->arcs_list[ARC_BUFC_METADATA];
2028 2028 goto top;
2029 2029 }
2030 2030
2031 2031 if (bufs_skipped) {
2032 2032 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2033 2033 ASSERT(bytes >= 0);
2034 2034 }
2035 2035
2036 2036 if (bytes_deleted < bytes)
2037 2037 dprintf("only deleted %lld bytes from %p",
2038 2038 (longlong_t)bytes_deleted, state);
2039 2039 }
2040 2040
2041 2041 static void
2042 2042 arc_adjust(void)
2043 2043 {
2044 2044 int64_t adjustment, delta;
2045 2045
2046 2046 /*
2047 2047 * Adjust MRU size
2048 2048 */
2049 2049
2050 2050 adjustment = MIN((int64_t)(arc_size - arc_c),
2051 2051 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2052 2052 arc_p));
2053 2053
2054 2054 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2055 2055 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2056 2056 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
2057 2057 adjustment -= delta;
2058 2058 }
2059 2059
2060 2060 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2061 2061 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2062 2062 (void) arc_evict(arc_mru, NULL, delta, FALSE,
2063 2063 ARC_BUFC_METADATA);
2064 2064 }
2065 2065
2066 2066 /*
2067 2067 * Adjust MFU size
2068 2068 */
2069 2069
2070 2070 adjustment = arc_size - arc_c;
2071 2071
2072 2072 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2073 2073 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2074 2074 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
2075 2075 adjustment -= delta;
2076 2076 }
2077 2077
2078 2078 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2079 2079 int64_t delta = MIN(adjustment,
2080 2080 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2081 2081 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
2082 2082 ARC_BUFC_METADATA);
2083 2083 }
2084 2084
2085 2085 /*
2086 2086 * Adjust ghost lists
2087 2087 */
2088 2088
2089 2089 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2090 2090
2091 2091 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2092 2092 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2093 2093 arc_evict_ghost(arc_mru_ghost, NULL, delta);
2094 2094 }
2095 2095
2096 2096 adjustment =
2097 2097 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2098 2098
2099 2099 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2100 2100 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2101 2101 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
2102 2102 }
2103 2103 }
2104 2104
2105 2105 static void
2106 2106 arc_do_user_evicts(void)
2107 2107 {
2108 2108 mutex_enter(&arc_eviction_mtx);
2109 2109 while (arc_eviction_list != NULL) {
2110 2110 arc_buf_t *buf = arc_eviction_list;
2111 2111 arc_eviction_list = buf->b_next;
2112 2112 mutex_enter(&buf->b_evict_lock);
2113 2113 buf->b_hdr = NULL;
2114 2114 mutex_exit(&buf->b_evict_lock);
2115 2115 mutex_exit(&arc_eviction_mtx);
2116 2116
2117 2117 if (buf->b_efunc != NULL)
2118 2118 VERIFY(buf->b_efunc(buf) == 0);
2119 2119
2120 2120 buf->b_efunc = NULL;
2121 2121 buf->b_private = NULL;
2122 2122 kmem_cache_free(buf_cache, buf);
2123 2123 mutex_enter(&arc_eviction_mtx);
2124 2124 }
2125 2125 mutex_exit(&arc_eviction_mtx);
2126 2126 }
2127 2127
2128 2128 /*
2129 2129 * Flush all *evictable* data from the cache for the given spa.
2130 2130 * NOTE: this will not touch "active" (i.e. referenced) data.
2131 2131 */
2132 2132 void
2133 2133 arc_flush(spa_t *spa)
2134 2134 {
2135 2135 uint64_t guid = 0;
2136 2136
2137 2137 if (spa)
2138 2138 guid = spa_load_guid(spa);
2139 2139
2140 2140 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
2141 2141 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2142 2142 if (spa)
2143 2143 break;
2144 2144 }
2145 2145 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
2146 2146 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2147 2147 if (spa)
2148 2148 break;
2149 2149 }
2150 2150 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
2151 2151 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2152 2152 if (spa)
2153 2153 break;
2154 2154 }
2155 2155 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
2156 2156 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2157 2157 if (spa)
2158 2158 break;
2159 2159 }
2160 2160
2161 2161 arc_evict_ghost(arc_mru_ghost, guid, -1);
2162 2162 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2163 2163
2164 2164 mutex_enter(&arc_reclaim_thr_lock);
2165 2165 arc_do_user_evicts();
2166 2166 mutex_exit(&arc_reclaim_thr_lock);
2167 2167 ASSERT(spa || arc_eviction_list == NULL);
2168 2168 }
2169 2169
2170 2170 void
2171 2171 arc_shrink(void)
2172 2172 {
2173 2173 if (arc_c > arc_c_min) {
2174 2174 uint64_t to_free;
2175 2175
2176 2176 #ifdef _KERNEL
2177 2177 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
2178 2178 #else
2179 2179 to_free = arc_c >> arc_shrink_shift;
2180 2180 #endif
2181 2181 if (arc_c > arc_c_min + to_free)
2182 2182 atomic_add_64(&arc_c, -to_free);
2183 2183 else
2184 2184 arc_c = arc_c_min;
2185 2185
2186 2186 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2187 2187 if (arc_c > arc_size)
2188 2188 arc_c = MAX(arc_size, arc_c_min);
2189 2189 if (arc_p > arc_c)
2190 2190 arc_p = (arc_c >> 1);
2191 2191 ASSERT(arc_c >= arc_c_min);
2192 2192 ASSERT((int64_t)arc_p >= 0);
2193 2193 }
2194 2194
2195 2195 if (arc_size > arc_c)
2196 2196 arc_adjust();
2197 2197 }
2198 2198
2199 2199 /*
2200 2200 * Determine if the system is under memory pressure and is asking
2201 2201 * to reclaim memory. A return value of 1 indicates that the system
2202 2202 * is under memory pressure and that the arc should adjust accordingly.
2203 2203 */
2204 2204 static int
2205 2205 arc_reclaim_needed(void)
2206 2206 {
2207 2207 uint64_t extra;
2208 2208
2209 2209 #ifdef _KERNEL
2210 2210
2211 2211 if (needfree)
2212 2212 return (1);
2213 2213
2214 2214 /*
2215 2215 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2216 2216 */
2217 2217 extra = desfree;
2218 2218
2219 2219 /*
2220 2220 * check that we're out of range of the pageout scanner. It starts to
2221 2221 * schedule paging if freemem is less than lotsfree and needfree.
2222 2222 * lotsfree is the high-water mark for pageout, and needfree is the
2223 2223 * number of needed free pages. We add extra pages here to make sure
2224 2224 * the scanner doesn't start up while we're freeing memory.
2225 2225 */
2226 2226 if (freemem < lotsfree + needfree + extra)
2227 2227 return (1);
2228 2228
2229 2229 /*
2230 2230 * check to make sure that swapfs has enough space so that anon
2231 2231 * reservations can still succeed. anon_resvmem() checks that the
2232 2232 * availrmem is greater than swapfs_minfree, and the number of reserved
2233 2233 * swap pages. We also add a bit of extra here just to prevent
2234 2234 * circumstances from getting really dire.
2235 2235 */
2236 2236 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2237 2237 return (1);
2238 2238
2239 2239 /*
2240 2240 * Check that we have enough availrmem that memory locking (e.g., via
2241 2241 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
2242 2242 * stores the number of pages that cannot be locked; when availrmem
2243 2243 * drops below pages_pp_maximum, page locking mechanisms such as
2244 2244 * page_pp_lock() will fail.)
2245 2245 */
2246 2246 if (availrmem <= pages_pp_maximum)
2247 2247 return (1);
2248 2248
2249 2249 #if defined(__i386)
2250 2250 /*
2251 2251 * If we're on an i386 platform, it's possible that we'll exhaust the
2252 2252 * kernel heap space before we ever run out of available physical
2253 2253 * memory. Most checks of the size of the heap_area compare against
2254 2254 * tune.t_minarmem, which is the minimum available real memory that we
2255 2255 * can have in the system. However, this is generally fixed at 25 pages
2256 2256 * which is so low that it's useless. In this comparison, we seek to
2257 2257 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2258 2258 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2259 2259 * free)
2260 2260 */
2261 2261 if (vmem_size(heap_arena, VMEM_FREE) <
2262 2262 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2263 2263 return (1);
2264 2264 #endif
2265 2265
2266 2266 /*
2267 2267 * If zio data pages are being allocated out of a separate heap segment,
2268 2268 * then enforce that the size of available vmem for this arena remains
2269 2269 * above about 1/16th free.
2270 2270 *
2271 2271 * Note: The 1/16th arena free requirement was put in place
2272 2272 * to aggressively evict memory from the arc in order to avoid
2273 2273 * memory fragmentation issues.
2274 2274 */
2275 2275 if (zio_arena != NULL &&
2276 2276 vmem_size(zio_arena, VMEM_FREE) <
2277 2277 (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2278 2278 return (1);
2279 2279 #else
2280 2280 if (spa_get_random(100) == 0)
2281 2281 return (1);
2282 2282 #endif
2283 2283 return (0);
2284 2284 }
2285 2285
2286 2286 static void
2287 2287 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2288 2288 {
2289 2289 size_t i;
2290 2290 kmem_cache_t *prev_cache = NULL;
2291 2291 kmem_cache_t *prev_data_cache = NULL;
2292 2292 extern kmem_cache_t *zio_buf_cache[];
2293 2293 extern kmem_cache_t *zio_data_buf_cache[];
2294 2294
2295 2295 #ifdef _KERNEL
2296 2296 if (arc_meta_used >= arc_meta_limit) {
2297 2297 /*
2298 2298 * We are exceeding our meta-data cache limit.
2299 2299 * Purge some DNLC entries to release holds on meta-data.
2300 2300 */
2301 2301 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2302 2302 }
2303 2303 #if defined(__i386)
2304 2304 /*
2305 2305 * Reclaim unused memory from all kmem caches.
2306 2306 */
2307 2307 kmem_reap();
2308 2308 #endif
2309 2309 #endif
2310 2310
2311 2311 /*
2312 2312 * An aggressive reclamation will shrink the cache size as well as
2313 2313 * reap free buffers from the arc kmem caches.
2314 2314 */
2315 2315 if (strat == ARC_RECLAIM_AGGR)
2316 2316 arc_shrink();
2317 2317
2318 2318 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2319 2319 if (zio_buf_cache[i] != prev_cache) {
2320 2320 prev_cache = zio_buf_cache[i];
2321 2321 kmem_cache_reap_now(zio_buf_cache[i]);
2322 2322 }
2323 2323 if (zio_data_buf_cache[i] != prev_data_cache) {
2324 2324 prev_data_cache = zio_data_buf_cache[i];
2325 2325 kmem_cache_reap_now(zio_data_buf_cache[i]);
2326 2326 }
2327 2327 }
2328 2328 kmem_cache_reap_now(buf_cache);
2329 2329 kmem_cache_reap_now(hdr_cache);
2330 2330
2331 2331 /*
2332 2332 * Ask the vmem areana to reclaim unused memory from its
2333 2333 * quantum caches.
2334 2334 */
2335 2335 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2336 2336 vmem_qcache_reap(zio_arena);
2337 2337 }
2338 2338
2339 2339 static void
2340 2340 arc_reclaim_thread(void)
2341 2341 {
2342 2342 clock_t growtime = 0;
2343 2343 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2344 2344 callb_cpr_t cpr;
2345 2345
2346 2346 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2347 2347
2348 2348 mutex_enter(&arc_reclaim_thr_lock);
2349 2349 while (arc_thread_exit == 0) {
2350 2350 if (arc_reclaim_needed()) {
2351 2351
2352 2352 if (arc_no_grow) {
2353 2353 if (last_reclaim == ARC_RECLAIM_CONS) {
2354 2354 last_reclaim = ARC_RECLAIM_AGGR;
2355 2355 } else {
2356 2356 last_reclaim = ARC_RECLAIM_CONS;
2357 2357 }
2358 2358 } else {
2359 2359 arc_no_grow = TRUE;
2360 2360 last_reclaim = ARC_RECLAIM_AGGR;
2361 2361 membar_producer();
2362 2362 }
2363 2363
2364 2364 /* reset the growth delay for every reclaim */
2365 2365 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2366 2366
2367 2367 arc_kmem_reap_now(last_reclaim);
2368 2368 arc_warm = B_TRUE;
2369 2369
2370 2370 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2371 2371 arc_no_grow = FALSE;
2372 2372 }
2373 2373
2374 2374 arc_adjust();
2375 2375
2376 2376 if (arc_eviction_list != NULL)
2377 2377 arc_do_user_evicts();
2378 2378
2379 2379 /* block until needed, or one second, whichever is shorter */
2380 2380 CALLB_CPR_SAFE_BEGIN(&cpr);
2381 2381 (void) cv_timedwait(&arc_reclaim_thr_cv,
2382 2382 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2383 2383 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2384 2384 }
2385 2385
2386 2386 arc_thread_exit = 0;
2387 2387 cv_broadcast(&arc_reclaim_thr_cv);
2388 2388 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2389 2389 thread_exit();
2390 2390 }
2391 2391
2392 2392 /*
2393 2393 * Adapt arc info given the number of bytes we are trying to add and
2394 2394 * the state that we are comming from. This function is only called
2395 2395 * when we are adding new content to the cache.
2396 2396 */
2397 2397 static void
2398 2398 arc_adapt(int bytes, arc_state_t *state)
2399 2399 {
2400 2400 int mult;
2401 2401 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2402 2402
2403 2403 if (state == arc_l2c_only)
2404 2404 return;
2405 2405
2406 2406 ASSERT(bytes > 0);
2407 2407 /*
2408 2408 * Adapt the target size of the MRU list:
2409 2409 * - if we just hit in the MRU ghost list, then increase
2410 2410 * the target size of the MRU list.
2411 2411 * - if we just hit in the MFU ghost list, then increase
2412 2412 * the target size of the MFU list by decreasing the
2413 2413 * target size of the MRU list.
2414 2414 */
2415 2415 if (state == arc_mru_ghost) {
2416 2416 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2417 2417 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2418 2418 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2419 2419
2420 2420 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2421 2421 } else if (state == arc_mfu_ghost) {
2422 2422 uint64_t delta;
2423 2423
2424 2424 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2425 2425 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2426 2426 mult = MIN(mult, 10);
2427 2427
2428 2428 delta = MIN(bytes * mult, arc_p);
2429 2429 arc_p = MAX(arc_p_min, arc_p - delta);
2430 2430 }
2431 2431 ASSERT((int64_t)arc_p >= 0);
2432 2432
2433 2433 if (arc_reclaim_needed()) {
2434 2434 cv_signal(&arc_reclaim_thr_cv);
2435 2435 return;
2436 2436 }
2437 2437
2438 2438 if (arc_no_grow)
2439 2439 return;
2440 2440
2441 2441 if (arc_c >= arc_c_max)
2442 2442 return;
2443 2443
2444 2444 /*
2445 2445 * If we're within (2 * maxblocksize) bytes of the target
2446 2446 * cache size, increment the target cache size
2447 2447 */
2448 2448 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2449 2449 atomic_add_64(&arc_c, (int64_t)bytes);
2450 2450 if (arc_c > arc_c_max)
2451 2451 arc_c = arc_c_max;
2452 2452 else if (state == arc_anon)
2453 2453 atomic_add_64(&arc_p, (int64_t)bytes);
2454 2454 if (arc_p > arc_c)
2455 2455 arc_p = arc_c;
2456 2456 }
2457 2457 ASSERT((int64_t)arc_p >= 0);
2458 2458 }
2459 2459
2460 2460 /*
2461 2461 * Check if the cache has reached its limits and eviction is required
2462 2462 * prior to insert.
2463 2463 */
2464 2464 static int
2465 2465 arc_evict_needed(arc_buf_contents_t type)
2466 2466 {
2467 2467 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2468 2468 return (1);
2469 2469
2470 2470 if (arc_reclaim_needed())
2471 2471 return (1);
2472 2472
2473 2473 return (arc_size > arc_c);
2474 2474 }
2475 2475
2476 2476 /*
2477 2477 * The buffer, supplied as the first argument, needs a data block.
2478 2478 * So, if we are at cache max, determine which cache should be victimized.
2479 2479 * We have the following cases:
2480 2480 *
2481 2481 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2482 2482 * In this situation if we're out of space, but the resident size of the MFU is
2483 2483 * under the limit, victimize the MFU cache to satisfy this insertion request.
2484 2484 *
2485 2485 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2486 2486 * Here, we've used up all of the available space for the MRU, so we need to
2487 2487 * evict from our own cache instead. Evict from the set of resident MRU
2488 2488 * entries.
2489 2489 *
2490 2490 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2491 2491 * c minus p represents the MFU space in the cache, since p is the size of the
2492 2492 * cache that is dedicated to the MRU. In this situation there's still space on
2493 2493 * the MFU side, so the MRU side needs to be victimized.
2494 2494 *
2495 2495 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2496 2496 * MFU's resident set is consuming more space than it has been allotted. In
2497 2497 * this situation, we must victimize our own cache, the MFU, for this insertion.
2498 2498 */
2499 2499 static void
2500 2500 arc_get_data_buf(arc_buf_t *buf)
2501 2501 {
2502 2502 arc_state_t *state = buf->b_hdr->b_state;
2503 2503 uint64_t size = buf->b_hdr->b_size;
2504 2504 arc_buf_contents_t type = buf->b_hdr->b_type;
2505 2505
2506 2506 arc_adapt(size, state);
2507 2507
2508 2508 /*
2509 2509 * We have not yet reached cache maximum size,
2510 2510 * just allocate a new buffer.
2511 2511 */
2512 2512 if (!arc_evict_needed(type)) {
2513 2513 if (type == ARC_BUFC_METADATA) {
2514 2514 buf->b_data = zio_buf_alloc(size);
2515 2515 arc_space_consume(size, ARC_SPACE_DATA);
2516 2516 } else {
2517 2517 ASSERT(type == ARC_BUFC_DATA);
2518 2518 buf->b_data = zio_data_buf_alloc(size);
2519 2519 ARCSTAT_INCR(arcstat_data_size, size);
2520 2520 atomic_add_64(&arc_size, size);
2521 2521 }
2522 2522 goto out;
2523 2523 }
2524 2524
2525 2525 /*
2526 2526 * If we are prefetching from the mfu ghost list, this buffer
2527 2527 * will end up on the mru list; so steal space from there.
2528 2528 */
2529 2529 if (state == arc_mfu_ghost)
2530 2530 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2531 2531 else if (state == arc_mru_ghost)
2532 2532 state = arc_mru;
2533 2533
2534 2534 if (state == arc_mru || state == arc_anon) {
2535 2535 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2536 2536 state = (arc_mfu->arcs_lsize[type] >= size &&
2537 2537 arc_p > mru_used) ? arc_mfu : arc_mru;
2538 2538 } else {
2539 2539 /* MFU cases */
2540 2540 uint64_t mfu_space = arc_c - arc_p;
2541 2541 state = (arc_mru->arcs_lsize[type] >= size &&
2542 2542 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2543 2543 }
2544 2544 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2545 2545 if (type == ARC_BUFC_METADATA) {
2546 2546 buf->b_data = zio_buf_alloc(size);
2547 2547 arc_space_consume(size, ARC_SPACE_DATA);
2548 2548 } else {
2549 2549 ASSERT(type == ARC_BUFC_DATA);
2550 2550 buf->b_data = zio_data_buf_alloc(size);
2551 2551 ARCSTAT_INCR(arcstat_data_size, size);
2552 2552 atomic_add_64(&arc_size, size);
2553 2553 }
2554 2554 ARCSTAT_BUMP(arcstat_recycle_miss);
2555 2555 }
2556 2556 ASSERT(buf->b_data != NULL);
2557 2557 out:
2558 2558 /*
2559 2559 * Update the state size. Note that ghost states have a
2560 2560 * "ghost size" and so don't need to be updated.
2561 2561 */
2562 2562 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2563 2563 arc_buf_hdr_t *hdr = buf->b_hdr;
2564 2564
2565 2565 atomic_add_64(&hdr->b_state->arcs_size, size);
2566 2566 if (list_link_active(&hdr->b_arc_node)) {
2567 2567 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2568 2568 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2569 2569 }
2570 2570 /*
2571 2571 * If we are growing the cache, and we are adding anonymous
2572 2572 * data, and we have outgrown arc_p, update arc_p
2573 2573 */
2574 2574 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2575 2575 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2576 2576 arc_p = MIN(arc_c, arc_p + size);
2577 2577 }
2578 2578 }
2579 2579
2580 2580 /*
2581 2581 * This routine is called whenever a buffer is accessed.
2582 2582 * NOTE: the hash lock is dropped in this function.
2583 2583 */
2584 2584 static void
2585 2585 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2586 2586 {
2587 2587 clock_t now;
2588 2588
2589 2589 ASSERT(MUTEX_HELD(hash_lock));
2590 2590
2591 2591 if (buf->b_state == arc_anon) {
2592 2592 /*
2593 2593 * This buffer is not in the cache, and does not
2594 2594 * appear in our "ghost" list. Add the new buffer
2595 2595 * to the MRU state.
2596 2596 */
2597 2597
2598 2598 ASSERT(buf->b_arc_access == 0);
2599 2599 buf->b_arc_access = ddi_get_lbolt();
2600 2600 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2601 2601 arc_change_state(arc_mru, buf, hash_lock);
2602 2602
2603 2603 } else if (buf->b_state == arc_mru) {
2604 2604 now = ddi_get_lbolt();
2605 2605
2606 2606 /*
2607 2607 * If this buffer is here because of a prefetch, then either:
2608 2608 * - clear the flag if this is a "referencing" read
2609 2609 * (any subsequent access will bump this into the MFU state).
2610 2610 * or
2611 2611 * - move the buffer to the head of the list if this is
2612 2612 * another prefetch (to make it less likely to be evicted).
2613 2613 */
2614 2614 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2615 2615 if (refcount_count(&buf->b_refcnt) == 0) {
2616 2616 ASSERT(list_link_active(&buf->b_arc_node));
2617 2617 } else {
2618 2618 buf->b_flags &= ~ARC_PREFETCH;
2619 2619 ARCSTAT_BUMP(arcstat_mru_hits);
2620 2620 }
2621 2621 buf->b_arc_access = now;
2622 2622 return;
2623 2623 }
2624 2624
2625 2625 /*
2626 2626 * This buffer has been "accessed" only once so far,
2627 2627 * but it is still in the cache. Move it to the MFU
2628 2628 * state.
2629 2629 */
2630 2630 if (now > buf->b_arc_access + ARC_MINTIME) {
2631 2631 /*
2632 2632 * More than 125ms have passed since we
2633 2633 * instantiated this buffer. Move it to the
2634 2634 * most frequently used state.
2635 2635 */
2636 2636 buf->b_arc_access = now;
2637 2637 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2638 2638 arc_change_state(arc_mfu, buf, hash_lock);
2639 2639 }
2640 2640 ARCSTAT_BUMP(arcstat_mru_hits);
2641 2641 } else if (buf->b_state == arc_mru_ghost) {
2642 2642 arc_state_t *new_state;
2643 2643 /*
2644 2644 * This buffer has been "accessed" recently, but
2645 2645 * was evicted from the cache. Move it to the
2646 2646 * MFU state.
2647 2647 */
2648 2648
2649 2649 if (buf->b_flags & ARC_PREFETCH) {
2650 2650 new_state = arc_mru;
2651 2651 if (refcount_count(&buf->b_refcnt) > 0)
2652 2652 buf->b_flags &= ~ARC_PREFETCH;
2653 2653 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2654 2654 } else {
2655 2655 new_state = arc_mfu;
2656 2656 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2657 2657 }
2658 2658
2659 2659 buf->b_arc_access = ddi_get_lbolt();
2660 2660 arc_change_state(new_state, buf, hash_lock);
2661 2661
2662 2662 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2663 2663 } else if (buf->b_state == arc_mfu) {
2664 2664 /*
2665 2665 * This buffer has been accessed more than once and is
2666 2666 * still in the cache. Keep it in the MFU state.
2667 2667 *
2668 2668 * NOTE: an add_reference() that occurred when we did
2669 2669 * the arc_read() will have kicked this off the list.
2670 2670 * If it was a prefetch, we will explicitly move it to
2671 2671 * the head of the list now.
2672 2672 */
2673 2673 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2674 2674 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2675 2675 ASSERT(list_link_active(&buf->b_arc_node));
2676 2676 }
2677 2677 ARCSTAT_BUMP(arcstat_mfu_hits);
2678 2678 buf->b_arc_access = ddi_get_lbolt();
2679 2679 } else if (buf->b_state == arc_mfu_ghost) {
2680 2680 arc_state_t *new_state = arc_mfu;
2681 2681 /*
2682 2682 * This buffer has been accessed more than once but has
2683 2683 * been evicted from the cache. Move it back to the
2684 2684 * MFU state.
2685 2685 */
2686 2686
2687 2687 if (buf->b_flags & ARC_PREFETCH) {
2688 2688 /*
2689 2689 * This is a prefetch access...
2690 2690 * move this block back to the MRU state.
2691 2691 */
2692 2692 ASSERT0(refcount_count(&buf->b_refcnt));
2693 2693 new_state = arc_mru;
2694 2694 }
2695 2695
2696 2696 buf->b_arc_access = ddi_get_lbolt();
2697 2697 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2698 2698 arc_change_state(new_state, buf, hash_lock);
2699 2699
2700 2700 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2701 2701 } else if (buf->b_state == arc_l2c_only) {
2702 2702 /*
2703 2703 * This buffer is on the 2nd Level ARC.
2704 2704 */
2705 2705
2706 2706 buf->b_arc_access = ddi_get_lbolt();
2707 2707 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2708 2708 arc_change_state(arc_mfu, buf, hash_lock);
2709 2709 } else {
2710 2710 ASSERT(!"invalid arc state");
2711 2711 }
2712 2712 }
2713 2713
2714 2714 /* a generic arc_done_func_t which you can use */
2715 2715 /* ARGSUSED */
2716 2716 void
2717 2717 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2718 2718 {
2719 2719 if (zio == NULL || zio->io_error == 0)
2720 2720 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2721 2721 VERIFY(arc_buf_remove_ref(buf, arg));
2722 2722 }
2723 2723
2724 2724 /* a generic arc_done_func_t */
2725 2725 void
2726 2726 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2727 2727 {
2728 2728 arc_buf_t **bufp = arg;
2729 2729 if (zio && zio->io_error) {
2730 2730 VERIFY(arc_buf_remove_ref(buf, arg));
2731 2731 *bufp = NULL;
2732 2732 } else {
2733 2733 *bufp = buf;
2734 2734 ASSERT(buf->b_data);
2735 2735 }
2736 2736 }
2737 2737
2738 2738 static void
2739 2739 arc_read_done(zio_t *zio)
2740 2740 {
2741 2741 arc_buf_hdr_t *hdr, *found;
2742 2742 arc_buf_t *buf;
2743 2743 arc_buf_t *abuf; /* buffer we're assigning to callback */
2744 2744 kmutex_t *hash_lock;
2745 2745 arc_callback_t *callback_list, *acb;
2746 2746 int freeable = FALSE;
2747 2747
2748 2748 buf = zio->io_private;
2749 2749 hdr = buf->b_hdr;
2750 2750
2751 2751 /*
2752 2752 * The hdr was inserted into hash-table and removed from lists
2753 2753 * prior to starting I/O. We should find this header, since
2754 2754 * it's in the hash table, and it should be legit since it's
2755 2755 * not possible to evict it during the I/O. The only possible
2756 2756 * reason for it not to be found is if we were freed during the
2757 2757 * read.
2758 2758 */
2759 2759 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2760 2760 &hash_lock);
2761 2761
2762 2762 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2763 2763 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2764 2764 (found == hdr && HDR_L2_READING(hdr)));
2765 2765
2766 2766 hdr->b_flags &= ~ARC_L2_EVICTED;
2767 2767 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2768 2768 hdr->b_flags &= ~ARC_L2CACHE;
2769 2769
2770 2770 /* byteswap if necessary */
2771 2771 callback_list = hdr->b_acb;
2772 2772 ASSERT(callback_list != NULL);
2773 2773 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2774 2774 dmu_object_byteswap_t bswap =
2775 2775 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2776 2776 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2777 2777 byteswap_uint64_array :
2778 2778 dmu_ot_byteswap[bswap].ob_func;
2779 2779 func(buf->b_data, hdr->b_size);
2780 2780 }
2781 2781
2782 2782 arc_cksum_compute(buf, B_FALSE);
2783 2783 arc_buf_watch(buf);
2784 2784
2785 2785 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2786 2786 /*
2787 2787 * Only call arc_access on anonymous buffers. This is because
2788 2788 * if we've issued an I/O for an evicted buffer, we've already
2789 2789 * called arc_access (to prevent any simultaneous readers from
2790 2790 * getting confused).
2791 2791 */
2792 2792 arc_access(hdr, hash_lock);
2793 2793 }
2794 2794
2795 2795 /* create copies of the data buffer for the callers */
2796 2796 abuf = buf;
2797 2797 for (acb = callback_list; acb; acb = acb->acb_next) {
2798 2798 if (acb->acb_done) {
2799 2799 if (abuf == NULL) {
2800 2800 ARCSTAT_BUMP(arcstat_duplicate_reads);
2801 2801 abuf = arc_buf_clone(buf);
2802 2802 }
2803 2803 acb->acb_buf = abuf;
2804 2804 abuf = NULL;
2805 2805 }
2806 2806 }
2807 2807 hdr->b_acb = NULL;
2808 2808 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2809 2809 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2810 2810 if (abuf == buf) {
2811 2811 ASSERT(buf->b_efunc == NULL);
2812 2812 ASSERT(hdr->b_datacnt == 1);
2813 2813 hdr->b_flags |= ARC_BUF_AVAILABLE;
2814 2814 }
2815 2815
2816 2816 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2817 2817
2818 2818 if (zio->io_error != 0) {
2819 2819 hdr->b_flags |= ARC_IO_ERROR;
2820 2820 if (hdr->b_state != arc_anon)
2821 2821 arc_change_state(arc_anon, hdr, hash_lock);
2822 2822 if (HDR_IN_HASH_TABLE(hdr))
2823 2823 buf_hash_remove(hdr);
2824 2824 freeable = refcount_is_zero(&hdr->b_refcnt);
2825 2825 }
2826 2826
2827 2827 /*
2828 2828 * Broadcast before we drop the hash_lock to avoid the possibility
2829 2829 * that the hdr (and hence the cv) might be freed before we get to
2830 2830 * the cv_broadcast().
2831 2831 */
2832 2832 cv_broadcast(&hdr->b_cv);
2833 2833
2834 2834 if (hash_lock) {
2835 2835 mutex_exit(hash_lock);
2836 2836 } else {
2837 2837 /*
2838 2838 * This block was freed while we waited for the read to
2839 2839 * complete. It has been removed from the hash table and
2840 2840 * moved to the anonymous state (so that it won't show up
2841 2841 * in the cache).
2842 2842 */
2843 2843 ASSERT3P(hdr->b_state, ==, arc_anon);
2844 2844 freeable = refcount_is_zero(&hdr->b_refcnt);
2845 2845 }
2846 2846
2847 2847 /* execute each callback and free its structure */
2848 2848 while ((acb = callback_list) != NULL) {
2849 2849 if (acb->acb_done)
2850 2850 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2851 2851
2852 2852 if (acb->acb_zio_dummy != NULL) {
2853 2853 acb->acb_zio_dummy->io_error = zio->io_error;
2854 2854 zio_nowait(acb->acb_zio_dummy);
2855 2855 }
2856 2856
2857 2857 callback_list = acb->acb_next;
2858 2858 kmem_free(acb, sizeof (arc_callback_t));
2859 2859 }
2860 2860
2861 2861 if (freeable)
2862 2862 arc_hdr_destroy(hdr);
2863 2863 }
2864 2864
2865 2865 /*
2866 2866 * "Read" the block at the specified DVA (in bp) via the
2867 2867 * cache. If the block is found in the cache, invoke the provided
2868 2868 * callback immediately and return. Note that the `zio' parameter
2869 2869 * in the callback will be NULL in this case, since no IO was
2870 2870 * required. If the block is not in the cache pass the read request
2871 2871 * on to the spa with a substitute callback function, so that the
2872 2872 * requested block will be added to the cache.
2873 2873 *
2874 2874 * If a read request arrives for a block that has a read in-progress,
2875 2875 * either wait for the in-progress read to complete (and return the
2876 2876 * results); or, if this is a read with a "done" func, add a record
2877 2877 * to the read to invoke the "done" func when the read completes,
2878 2878 * and return; or just return.
2879 2879 *
2880 2880 * arc_read_done() will invoke all the requested "done" functions
2881 2881 * for readers of this block.
2882 2882 */
2883 2883 int
2884 2884 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2885 2885 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
2886 2886 const zbookmark_t *zb)
2887 2887 {
2888 2888 arc_buf_hdr_t *hdr;
2889 2889 arc_buf_t *buf = NULL;
2890 2890 kmutex_t *hash_lock;
2891 2891 zio_t *rzio;
2892 2892 uint64_t guid = spa_load_guid(spa);
2893 2893
2894 2894 top:
2895 2895 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2896 2896 &hash_lock);
2897 2897 if (hdr && hdr->b_datacnt > 0) {
2898 2898
2899 2899 *arc_flags |= ARC_CACHED;
2900 2900
2901 2901 if (HDR_IO_IN_PROGRESS(hdr)) {
2902 2902
2903 2903 if (*arc_flags & ARC_WAIT) {
2904 2904 cv_wait(&hdr->b_cv, hash_lock);
2905 2905 mutex_exit(hash_lock);
2906 2906 goto top;
2907 2907 }
2908 2908 ASSERT(*arc_flags & ARC_NOWAIT);
2909 2909
2910 2910 if (done) {
2911 2911 arc_callback_t *acb = NULL;
2912 2912
2913 2913 acb = kmem_zalloc(sizeof (arc_callback_t),
2914 2914 KM_SLEEP);
2915 2915 acb->acb_done = done;
2916 2916 acb->acb_private = private;
2917 2917 if (pio != NULL)
2918 2918 acb->acb_zio_dummy = zio_null(pio,
2919 2919 spa, NULL, NULL, NULL, zio_flags);
2920 2920
2921 2921 ASSERT(acb->acb_done != NULL);
2922 2922 acb->acb_next = hdr->b_acb;
2923 2923 hdr->b_acb = acb;
2924 2924 add_reference(hdr, hash_lock, private);
2925 2925 mutex_exit(hash_lock);
2926 2926 return (0);
2927 2927 }
2928 2928 mutex_exit(hash_lock);
2929 2929 return (0);
2930 2930 }
2931 2931
2932 2932 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2933 2933
2934 2934 if (done) {
2935 2935 add_reference(hdr, hash_lock, private);
2936 2936 /*
2937 2937 * If this block is already in use, create a new
2938 2938 * copy of the data so that we will be guaranteed
2939 2939 * that arc_release() will always succeed.
2940 2940 */
2941 2941 buf = hdr->b_buf;
2942 2942 ASSERT(buf);
2943 2943 ASSERT(buf->b_data);
2944 2944 if (HDR_BUF_AVAILABLE(hdr)) {
2945 2945 ASSERT(buf->b_efunc == NULL);
2946 2946 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2947 2947 } else {
2948 2948 buf = arc_buf_clone(buf);
2949 2949 }
2950 2950
2951 2951 } else if (*arc_flags & ARC_PREFETCH &&
2952 2952 refcount_count(&hdr->b_refcnt) == 0) {
2953 2953 hdr->b_flags |= ARC_PREFETCH;
2954 2954 }
2955 2955 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2956 2956 arc_access(hdr, hash_lock);
2957 2957 if (*arc_flags & ARC_L2CACHE)
2958 2958 hdr->b_flags |= ARC_L2CACHE;
2959 2959 if (*arc_flags & ARC_L2COMPRESS)
2960 2960 hdr->b_flags |= ARC_L2COMPRESS;
2961 2961 mutex_exit(hash_lock);
2962 2962 ARCSTAT_BUMP(arcstat_hits);
2963 2963 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2964 2964 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2965 2965 data, metadata, hits);
2966 2966
2967 2967 if (done)
2968 2968 done(NULL, buf, private);
2969 2969 } else {
2970 2970 uint64_t size = BP_GET_LSIZE(bp);
2971 2971 arc_callback_t *acb;
2972 2972 vdev_t *vd = NULL;
2973 2973 uint64_t addr = 0;
2974 2974 boolean_t devw = B_FALSE;
2975 2975
2976 2976 if (hdr == NULL) {
2977 2977 /* this block is not in the cache */
2978 2978 arc_buf_hdr_t *exists;
2979 2979 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2980 2980 buf = arc_buf_alloc(spa, size, private, type);
2981 2981 hdr = buf->b_hdr;
2982 2982 hdr->b_dva = *BP_IDENTITY(bp);
2983 2983 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2984 2984 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2985 2985 exists = buf_hash_insert(hdr, &hash_lock);
2986 2986 if (exists) {
2987 2987 /* somebody beat us to the hash insert */
2988 2988 mutex_exit(hash_lock);
2989 2989 buf_discard_identity(hdr);
2990 2990 (void) arc_buf_remove_ref(buf, private);
2991 2991 goto top; /* restart the IO request */
2992 2992 }
2993 2993 /* if this is a prefetch, we don't have a reference */
2994 2994 if (*arc_flags & ARC_PREFETCH) {
2995 2995 (void) remove_reference(hdr, hash_lock,
2996 2996 private);
2997 2997 hdr->b_flags |= ARC_PREFETCH;
2998 2998 }
2999 2999 if (*arc_flags & ARC_L2CACHE)
3000 3000 hdr->b_flags |= ARC_L2CACHE;
3001 3001 if (*arc_flags & ARC_L2COMPRESS)
3002 3002 hdr->b_flags |= ARC_L2COMPRESS;
3003 3003 if (BP_GET_LEVEL(bp) > 0)
3004 3004 hdr->b_flags |= ARC_INDIRECT;
3005 3005 } else {
3006 3006 /* this block is in the ghost cache */
3007 3007 ASSERT(GHOST_STATE(hdr->b_state));
3008 3008 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3009 3009 ASSERT0(refcount_count(&hdr->b_refcnt));
3010 3010 ASSERT(hdr->b_buf == NULL);
3011 3011
3012 3012 /* if this is a prefetch, we don't have a reference */
3013 3013 if (*arc_flags & ARC_PREFETCH)
3014 3014 hdr->b_flags |= ARC_PREFETCH;
3015 3015 else
3016 3016 add_reference(hdr, hash_lock, private);
3017 3017 if (*arc_flags & ARC_L2CACHE)
3018 3018 hdr->b_flags |= ARC_L2CACHE;
3019 3019 if (*arc_flags & ARC_L2COMPRESS)
3020 3020 hdr->b_flags |= ARC_L2COMPRESS;
3021 3021 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3022 3022 buf->b_hdr = hdr;
3023 3023 buf->b_data = NULL;
3024 3024 buf->b_efunc = NULL;
3025 3025 buf->b_private = NULL;
3026 3026 buf->b_next = NULL;
3027 3027 hdr->b_buf = buf;
3028 3028 ASSERT(hdr->b_datacnt == 0);
3029 3029 hdr->b_datacnt = 1;
3030 3030 arc_get_data_buf(buf);
3031 3031 arc_access(hdr, hash_lock);
3032 3032 }
3033 3033
3034 3034 ASSERT(!GHOST_STATE(hdr->b_state));
3035 3035
3036 3036 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3037 3037 acb->acb_done = done;
3038 3038 acb->acb_private = private;
3039 3039
3040 3040 ASSERT(hdr->b_acb == NULL);
3041 3041 hdr->b_acb = acb;
3042 3042 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3043 3043
3044 3044 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
3045 3045 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3046 3046 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3047 3047 addr = hdr->b_l2hdr->b_daddr;
3048 3048 /*
3049 3049 * Lock out device removal.
3050 3050 */
3051 3051 if (vdev_is_dead(vd) ||
3052 3052 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3053 3053 vd = NULL;
3054 3054 }
3055 3055
3056 3056 mutex_exit(hash_lock);
3057 3057
3058 3058 /*
3059 3059 * At this point, we have a level 1 cache miss. Try again in
3060 3060 * L2ARC if possible.
3061 3061 */
3062 3062 ASSERT3U(hdr->b_size, ==, size);
3063 3063 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3064 3064 uint64_t, size, zbookmark_t *, zb);
3065 3065 ARCSTAT_BUMP(arcstat_misses);
3066 3066 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3067 3067 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3068 3068 data, metadata, misses);
3069 3069
3070 3070 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3071 3071 /*
3072 3072 * Read from the L2ARC if the following are true:
3073 3073 * 1. The L2ARC vdev was previously cached.
3074 3074 * 2. This buffer still has L2ARC metadata.
3075 3075 * 3. This buffer isn't currently writing to the L2ARC.
3076 3076 * 4. The L2ARC entry wasn't evicted, which may
3077 3077 * also have invalidated the vdev.
3078 3078 * 5. This isn't prefetch and l2arc_noprefetch is set.
3079 3079 */
3080 3080 if (hdr->b_l2hdr != NULL &&
3081 3081 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3082 3082 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3083 3083 l2arc_read_callback_t *cb;
3084 3084
3085 3085 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3086 3086 ARCSTAT_BUMP(arcstat_l2_hits);
3087 3087
3088 3088 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3089 3089 KM_SLEEP);
3090 3090 cb->l2rcb_buf = buf;
3091 3091 cb->l2rcb_spa = spa;
3092 3092 cb->l2rcb_bp = *bp;
3093 3093 cb->l2rcb_zb = *zb;
3094 3094 cb->l2rcb_flags = zio_flags;
3095 3095 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3096 3096
3097 3097 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3098 3098 addr + size < vd->vdev_psize -
3099 3099 VDEV_LABEL_END_SIZE);
3100 3100
3101 3101 /*
3102 3102 * l2arc read. The SCL_L2ARC lock will be
3103 3103 * released by l2arc_read_done().
3104 3104 * Issue a null zio if the underlying buffer
3105 3105 * was squashed to zero size by compression.
3106 3106 */
3107 3107 if (hdr->b_l2hdr->b_compress ==
3108 3108 ZIO_COMPRESS_EMPTY) {
3109 3109 rzio = zio_null(pio, spa, vd,
3110 3110 l2arc_read_done, cb,
3111 3111 zio_flags | ZIO_FLAG_DONT_CACHE |
3112 3112 ZIO_FLAG_CANFAIL |
3113 3113 ZIO_FLAG_DONT_PROPAGATE |
3114 3114 ZIO_FLAG_DONT_RETRY);
3115 3115 } else {
3116 3116 rzio = zio_read_phys(pio, vd, addr,
3117 3117 hdr->b_l2hdr->b_asize,
3118 3118 buf->b_data, ZIO_CHECKSUM_OFF,
3119 3119 l2arc_read_done, cb, priority,
3120 3120 zio_flags | ZIO_FLAG_DONT_CACHE |
3121 3121 ZIO_FLAG_CANFAIL |
3122 3122 ZIO_FLAG_DONT_PROPAGATE |
3123 3123 ZIO_FLAG_DONT_RETRY, B_FALSE);
3124 3124 }
3125 3125 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3126 3126 zio_t *, rzio);
3127 3127 ARCSTAT_INCR(arcstat_l2_read_bytes,
3128 3128 hdr->b_l2hdr->b_asize);
3129 3129
3130 3130 if (*arc_flags & ARC_NOWAIT) {
3131 3131 zio_nowait(rzio);
3132 3132 return (0);
3133 3133 }
3134 3134
3135 3135 ASSERT(*arc_flags & ARC_WAIT);
3136 3136 if (zio_wait(rzio) == 0)
3137 3137 return (0);
3138 3138
3139 3139 /* l2arc read error; goto zio_read() */
3140 3140 } else {
3141 3141 DTRACE_PROBE1(l2arc__miss,
3142 3142 arc_buf_hdr_t *, hdr);
3143 3143 ARCSTAT_BUMP(arcstat_l2_misses);
3144 3144 if (HDR_L2_WRITING(hdr))
3145 3145 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3146 3146 spa_config_exit(spa, SCL_L2ARC, vd);
3147 3147 }
3148 3148 } else {
3149 3149 if (vd != NULL)
3150 3150 spa_config_exit(spa, SCL_L2ARC, vd);
3151 3151 if (l2arc_ndev != 0) {
3152 3152 DTRACE_PROBE1(l2arc__miss,
3153 3153 arc_buf_hdr_t *, hdr);
3154 3154 ARCSTAT_BUMP(arcstat_l2_misses);
3155 3155 }
3156 3156 }
3157 3157
3158 3158 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3159 3159 arc_read_done, buf, priority, zio_flags, zb);
3160 3160
3161 3161 if (*arc_flags & ARC_WAIT)
3162 3162 return (zio_wait(rzio));
3163 3163
3164 3164 ASSERT(*arc_flags & ARC_NOWAIT);
3165 3165 zio_nowait(rzio);
3166 3166 }
3167 3167 return (0);
3168 3168 }
3169 3169
3170 3170 void
3171 3171 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3172 3172 {
3173 3173 ASSERT(buf->b_hdr != NULL);
3174 3174 ASSERT(buf->b_hdr->b_state != arc_anon);
3175 3175 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3176 3176 ASSERT(buf->b_efunc == NULL);
3177 3177 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3178 3178
3179 3179 buf->b_efunc = func;
3180 3180 buf->b_private = private;
3181 3181 }
3182 3182
3183 3183 /*
3184 3184 * Notify the arc that a block was freed, and thus will never be used again.
3185 3185 */
3186 3186 void
3187 3187 arc_freed(spa_t *spa, const blkptr_t *bp)
3188 3188 {
3189 3189 arc_buf_hdr_t *hdr;
3190 3190 kmutex_t *hash_lock;
3191 3191 uint64_t guid = spa_load_guid(spa);
3192 3192
3193 3193 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3194 3194 &hash_lock);
3195 3195 if (hdr == NULL)
3196 3196 return;
3197 3197 if (HDR_BUF_AVAILABLE(hdr)) {
3198 3198 arc_buf_t *buf = hdr->b_buf;
3199 3199 add_reference(hdr, hash_lock, FTAG);
3200 3200 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3201 3201 mutex_exit(hash_lock);
3202 3202
3203 3203 arc_release(buf, FTAG);
3204 3204 (void) arc_buf_remove_ref(buf, FTAG);
3205 3205 } else {
3206 3206 mutex_exit(hash_lock);
3207 3207 }
3208 3208
3209 3209 }
3210 3210
3211 3211 /*
3212 3212 * This is used by the DMU to let the ARC know that a buffer is
3213 3213 * being evicted, so the ARC should clean up. If this arc buf
3214 3214 * is not yet in the evicted state, it will be put there.
3215 3215 */
3216 3216 int
3217 3217 arc_buf_evict(arc_buf_t *buf)
3218 3218 {
3219 3219 arc_buf_hdr_t *hdr;
3220 3220 kmutex_t *hash_lock;
3221 3221 arc_buf_t **bufp;
3222 3222
3223 3223 mutex_enter(&buf->b_evict_lock);
3224 3224 hdr = buf->b_hdr;
3225 3225 if (hdr == NULL) {
3226 3226 /*
3227 3227 * We are in arc_do_user_evicts().
3228 3228 */
3229 3229 ASSERT(buf->b_data == NULL);
3230 3230 mutex_exit(&buf->b_evict_lock);
3231 3231 return (0);
3232 3232 } else if (buf->b_data == NULL) {
3233 3233 arc_buf_t copy = *buf; /* structure assignment */
3234 3234 /*
3235 3235 * We are on the eviction list; process this buffer now
3236 3236 * but let arc_do_user_evicts() do the reaping.
3237 3237 */
3238 3238 buf->b_efunc = NULL;
3239 3239 mutex_exit(&buf->b_evict_lock);
3240 3240 VERIFY(copy.b_efunc(©) == 0);
3241 3241 return (1);
3242 3242 }
3243 3243 hash_lock = HDR_LOCK(hdr);
3244 3244 mutex_enter(hash_lock);
3245 3245 hdr = buf->b_hdr;
3246 3246 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3247 3247
3248 3248 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3249 3249 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3250 3250
3251 3251 /*
3252 3252 * Pull this buffer off of the hdr
3253 3253 */
3254 3254 bufp = &hdr->b_buf;
3255 3255 while (*bufp != buf)
3256 3256 bufp = &(*bufp)->b_next;
3257 3257 *bufp = buf->b_next;
3258 3258
3259 3259 ASSERT(buf->b_data != NULL);
3260 3260 arc_buf_destroy(buf, FALSE, FALSE);
3261 3261
3262 3262 if (hdr->b_datacnt == 0) {
3263 3263 arc_state_t *old_state = hdr->b_state;
3264 3264 arc_state_t *evicted_state;
3265 3265
3266 3266 ASSERT(hdr->b_buf == NULL);
3267 3267 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3268 3268
3269 3269 evicted_state =
3270 3270 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3271 3271
3272 3272 mutex_enter(&old_state->arcs_mtx);
3273 3273 mutex_enter(&evicted_state->arcs_mtx);
3274 3274
3275 3275 arc_change_state(evicted_state, hdr, hash_lock);
3276 3276 ASSERT(HDR_IN_HASH_TABLE(hdr));
3277 3277 hdr->b_flags |= ARC_IN_HASH_TABLE;
3278 3278 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3279 3279
3280 3280 mutex_exit(&evicted_state->arcs_mtx);
3281 3281 mutex_exit(&old_state->arcs_mtx);
3282 3282 }
3283 3283 mutex_exit(hash_lock);
3284 3284 mutex_exit(&buf->b_evict_lock);
3285 3285
3286 3286 VERIFY(buf->b_efunc(buf) == 0);
3287 3287 buf->b_efunc = NULL;
3288 3288 buf->b_private = NULL;
3289 3289 buf->b_hdr = NULL;
3290 3290 buf->b_next = NULL;
3291 3291 kmem_cache_free(buf_cache, buf);
3292 3292 return (1);
3293 3293 }
3294 3294
3295 3295 /*
3296 3296 * Release this buffer from the cache, making it an anonymous buffer. This
3297 3297 * must be done after a read and prior to modifying the buffer contents.
3298 3298 * If the buffer has more than one reference, we must make
3299 3299 * a new hdr for the buffer.
3300 3300 */
3301 3301 void
3302 3302 arc_release(arc_buf_t *buf, void *tag)
3303 3303 {
3304 3304 arc_buf_hdr_t *hdr;
3305 3305 kmutex_t *hash_lock = NULL;
3306 3306 l2arc_buf_hdr_t *l2hdr;
3307 3307 uint64_t buf_size;
3308 3308
3309 3309 /*
3310 3310 * It would be nice to assert that if it's DMU metadata (level >
3311 3311 * 0 || it's the dnode file), then it must be syncing context.
3312 3312 * But we don't know that information at this level.
3313 3313 */
3314 3314
3315 3315 mutex_enter(&buf->b_evict_lock);
3316 3316 hdr = buf->b_hdr;
3317 3317
3318 3318 /* this buffer is not on any list */
3319 3319 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3320 3320
3321 3321 if (hdr->b_state == arc_anon) {
3322 3322 /* this buffer is already released */
3323 3323 ASSERT(buf->b_efunc == NULL);
3324 3324 } else {
3325 3325 hash_lock = HDR_LOCK(hdr);
3326 3326 mutex_enter(hash_lock);
3327 3327 hdr = buf->b_hdr;
3328 3328 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3329 3329 }
3330 3330
3331 3331 l2hdr = hdr->b_l2hdr;
3332 3332 if (l2hdr) {
3333 3333 mutex_enter(&l2arc_buflist_mtx);
3334 3334 hdr->b_l2hdr = NULL;
3335 3335 }
3336 3336 buf_size = hdr->b_size;
3337 3337
3338 3338 /*
3339 3339 * Do we have more than one buf?
3340 3340 */
3341 3341 if (hdr->b_datacnt > 1) {
3342 3342 arc_buf_hdr_t *nhdr;
3343 3343 arc_buf_t **bufp;
3344 3344 uint64_t blksz = hdr->b_size;
3345 3345 uint64_t spa = hdr->b_spa;
3346 3346 arc_buf_contents_t type = hdr->b_type;
3347 3347 uint32_t flags = hdr->b_flags;
3348 3348
3349 3349 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3350 3350 /*
3351 3351 * Pull the data off of this hdr and attach it to
3352 3352 * a new anonymous hdr.
3353 3353 */
3354 3354 (void) remove_reference(hdr, hash_lock, tag);
3355 3355 bufp = &hdr->b_buf;
3356 3356 while (*bufp != buf)
3357 3357 bufp = &(*bufp)->b_next;
3358 3358 *bufp = buf->b_next;
3359 3359 buf->b_next = NULL;
3360 3360
3361 3361 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3362 3362 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3363 3363 if (refcount_is_zero(&hdr->b_refcnt)) {
3364 3364 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3365 3365 ASSERT3U(*size, >=, hdr->b_size);
3366 3366 atomic_add_64(size, -hdr->b_size);
3367 3367 }
3368 3368
3369 3369 /*
3370 3370 * We're releasing a duplicate user data buffer, update
3371 3371 * our statistics accordingly.
3372 3372 */
3373 3373 if (hdr->b_type == ARC_BUFC_DATA) {
3374 3374 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3375 3375 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3376 3376 -hdr->b_size);
3377 3377 }
3378 3378 hdr->b_datacnt -= 1;
3379 3379 arc_cksum_verify(buf);
3380 3380 arc_buf_unwatch(buf);
3381 3381
3382 3382 mutex_exit(hash_lock);
3383 3383
3384 3384 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3385 3385 nhdr->b_size = blksz;
3386 3386 nhdr->b_spa = spa;
3387 3387 nhdr->b_type = type;
3388 3388 nhdr->b_buf = buf;
3389 3389 nhdr->b_state = arc_anon;
3390 3390 nhdr->b_arc_access = 0;
3391 3391 nhdr->b_flags = flags & ARC_L2_WRITING;
3392 3392 nhdr->b_l2hdr = NULL;
3393 3393 nhdr->b_datacnt = 1;
3394 3394 nhdr->b_freeze_cksum = NULL;
3395 3395 (void) refcount_add(&nhdr->b_refcnt, tag);
3396 3396 buf->b_hdr = nhdr;
3397 3397 mutex_exit(&buf->b_evict_lock);
3398 3398 atomic_add_64(&arc_anon->arcs_size, blksz);
3399 3399 } else {
3400 3400 mutex_exit(&buf->b_evict_lock);
3401 3401 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3402 3402 ASSERT(!list_link_active(&hdr->b_arc_node));
3403 3403 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3404 3404 if (hdr->b_state != arc_anon)
3405 3405 arc_change_state(arc_anon, hdr, hash_lock);
3406 3406 hdr->b_arc_access = 0;
3407 3407 if (hash_lock)
3408 3408 mutex_exit(hash_lock);
3409 3409
3410 3410 buf_discard_identity(hdr);
3411 3411 arc_buf_thaw(buf);
3412 3412 }
3413 3413 buf->b_efunc = NULL;
3414 3414 buf->b_private = NULL;
3415 3415
3416 3416 if (l2hdr) {
3417 3417 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3418 3418 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3419 3419 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3420 3420 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3421 3421 mutex_exit(&l2arc_buflist_mtx);
3422 3422 }
3423 3423 }
3424 3424
3425 3425 int
3426 3426 arc_released(arc_buf_t *buf)
3427 3427 {
3428 3428 int released;
3429 3429
3430 3430 mutex_enter(&buf->b_evict_lock);
3431 3431 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3432 3432 mutex_exit(&buf->b_evict_lock);
3433 3433 return (released);
3434 3434 }
3435 3435
3436 3436 int
3437 3437 arc_has_callback(arc_buf_t *buf)
3438 3438 {
3439 3439 int callback;
3440 3440
3441 3441 mutex_enter(&buf->b_evict_lock);
3442 3442 callback = (buf->b_efunc != NULL);
3443 3443 mutex_exit(&buf->b_evict_lock);
3444 3444 return (callback);
3445 3445 }
3446 3446
3447 3447 #ifdef ZFS_DEBUG
3448 3448 int
3449 3449 arc_referenced(arc_buf_t *buf)
3450 3450 {
3451 3451 int referenced;
3452 3452
3453 3453 mutex_enter(&buf->b_evict_lock);
3454 3454 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3455 3455 mutex_exit(&buf->b_evict_lock);
3456 3456 return (referenced);
3457 3457 }
3458 3458 #endif
3459 3459
3460 3460 static void
3461 3461 arc_write_ready(zio_t *zio)
3462 3462 {
3463 3463 arc_write_callback_t *callback = zio->io_private;
3464 3464 arc_buf_t *buf = callback->awcb_buf;
3465 3465 arc_buf_hdr_t *hdr = buf->b_hdr;
3466 3466
3467 3467 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3468 3468 callback->awcb_ready(zio, buf, callback->awcb_private);
3469 3469
3470 3470 /*
3471 3471 * If the IO is already in progress, then this is a re-write
3472 3472 * attempt, so we need to thaw and re-compute the cksum.
3473 3473 * It is the responsibility of the callback to handle the
3474 3474 * accounting for any re-write attempt.
3475 3475 */
3476 3476 if (HDR_IO_IN_PROGRESS(hdr)) {
3477 3477 mutex_enter(&hdr->b_freeze_lock);
3478 3478 if (hdr->b_freeze_cksum != NULL) {
3479 3479 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3480 3480 hdr->b_freeze_cksum = NULL;
3481 3481 }
3482 3482 mutex_exit(&hdr->b_freeze_lock);
3483 3483 }
3484 3484 arc_cksum_compute(buf, B_FALSE);
3485 3485 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3486 3486 }
3487 3487
3488 3488 /*
3489 3489 * The SPA calls this callback for each physical write that happens on behalf
3490 3490 * of a logical write. See the comment in dbuf_write_physdone() for details.
3491 3491 */
3492 3492 static void
3493 3493 arc_write_physdone(zio_t *zio)
3494 3494 {
3495 3495 arc_write_callback_t *cb = zio->io_private;
3496 3496 if (cb->awcb_physdone != NULL)
3497 3497 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3498 3498 }
3499 3499
3500 3500 static void
3501 3501 arc_write_done(zio_t *zio)
3502 3502 {
3503 3503 arc_write_callback_t *callback = zio->io_private;
3504 3504 arc_buf_t *buf = callback->awcb_buf;
3505 3505 arc_buf_hdr_t *hdr = buf->b_hdr;
3506 3506
3507 3507 ASSERT(hdr->b_acb == NULL);
3508 3508
3509 3509 if (zio->io_error == 0) {
3510 3510 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3511 3511 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3512 3512 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3513 3513 } else {
3514 3514 ASSERT(BUF_EMPTY(hdr));
3515 3515 }
3516 3516
3517 3517 /*
3518 3518 * If the block to be written was all-zero, we may have
3519 3519 * compressed it away. In this case no write was performed
3520 3520 * so there will be no dva/birth/checksum. The buffer must
3521 3521 * therefore remain anonymous (and uncached).
3522 3522 */
3523 3523 if (!BUF_EMPTY(hdr)) {
3524 3524 arc_buf_hdr_t *exists;
3525 3525 kmutex_t *hash_lock;
3526 3526
3527 3527 ASSERT(zio->io_error == 0);
3528 3528
3529 3529 arc_cksum_verify(buf);
3530 3530
3531 3531 exists = buf_hash_insert(hdr, &hash_lock);
3532 3532 if (exists) {
3533 3533 /*
3534 3534 * This can only happen if we overwrite for
3535 3535 * sync-to-convergence, because we remove
3536 3536 * buffers from the hash table when we arc_free().
3537 3537 */
3538 3538 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3539 3539 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3540 3540 panic("bad overwrite, hdr=%p exists=%p",
3541 3541 (void *)hdr, (void *)exists);
3542 3542 ASSERT(refcount_is_zero(&exists->b_refcnt));
3543 3543 arc_change_state(arc_anon, exists, hash_lock);
3544 3544 mutex_exit(hash_lock);
3545 3545 arc_hdr_destroy(exists);
3546 3546 exists = buf_hash_insert(hdr, &hash_lock);
3547 3547 ASSERT3P(exists, ==, NULL);
3548 3548 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3549 3549 /* nopwrite */
3550 3550 ASSERT(zio->io_prop.zp_nopwrite);
3551 3551 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3552 3552 panic("bad nopwrite, hdr=%p exists=%p",
3553 3553 (void *)hdr, (void *)exists);
3554 3554 } else {
3555 3555 /* Dedup */
3556 3556 ASSERT(hdr->b_datacnt == 1);
3557 3557 ASSERT(hdr->b_state == arc_anon);
3558 3558 ASSERT(BP_GET_DEDUP(zio->io_bp));
3559 3559 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3560 3560 }
3561 3561 }
3562 3562 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3563 3563 /* if it's not anon, we are doing a scrub */
3564 3564 if (!exists && hdr->b_state == arc_anon)
3565 3565 arc_access(hdr, hash_lock);
3566 3566 mutex_exit(hash_lock);
3567 3567 } else {
3568 3568 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3569 3569 }
3570 3570
3571 3571 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3572 3572 callback->awcb_done(zio, buf, callback->awcb_private);
3573 3573
3574 3574 kmem_free(callback, sizeof (arc_write_callback_t));
3575 3575 }
3576 3576
3577 3577 zio_t *
3578 3578 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3579 3579 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3580 3580 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3581 3581 arc_done_func_t *done, void *private, zio_priority_t priority,
3582 3582 int zio_flags, const zbookmark_t *zb)
3583 3583 {
3584 3584 arc_buf_hdr_t *hdr = buf->b_hdr;
3585 3585 arc_write_callback_t *callback;
3586 3586 zio_t *zio;
3587 3587
3588 3588 ASSERT(ready != NULL);
3589 3589 ASSERT(done != NULL);
3590 3590 ASSERT(!HDR_IO_ERROR(hdr));
3591 3591 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3592 3592 ASSERT(hdr->b_acb == NULL);
3593 3593 if (l2arc)
3594 3594 hdr->b_flags |= ARC_L2CACHE;
3595 3595 if (l2arc_compress)
3596 3596 hdr->b_flags |= ARC_L2COMPRESS;
3597 3597 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3598 3598 callback->awcb_ready = ready;
3599 3599 callback->awcb_physdone = physdone;
3600 3600 callback->awcb_done = done;
3601 3601 callback->awcb_private = private;
3602 3602 callback->awcb_buf = buf;
3603 3603
3604 3604 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3605 3605 arc_write_ready, arc_write_physdone, arc_write_done, callback,
3606 3606 priority, zio_flags, zb);
3607 3607
3608 3608 return (zio);
3609 3609 }
3610 3610
3611 3611 static int
3612 3612 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3613 3613 {
3614 3614 #ifdef _KERNEL
3615 3615 uint64_t available_memory = ptob(freemem);
3616 3616 static uint64_t page_load = 0;
3617 3617 static uint64_t last_txg = 0;
3618 3618
3619 3619 #if defined(__i386)
3620 3620 available_memory =
3621 3621 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3622 3622 #endif
3623 3623
3624 3624 if (freemem > physmem * arc_lotsfree_percent / 100)
3625 3625 return (0);
3626 3626
3627 3627 if (txg > last_txg) {
3628 3628 last_txg = txg;
3629 3629 page_load = 0;
3630 3630 }
3631 3631 /*
3632 3632 * If we are in pageout, we know that memory is already tight,
3633 3633 * the arc is already going to be evicting, so we just want to
3634 3634 * continue to let page writes occur as quickly as possible.
3635 3635 */
3636 3636 if (curproc == proc_pageout) {
3637 3637 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3638 3638 return (SET_ERROR(ERESTART));
3639 3639 /* Note: reserve is inflated, so we deflate */
3640 3640 page_load += reserve / 8;
3641 3641 return (0);
3642 3642 } else if (page_load > 0 && arc_reclaim_needed()) {
3643 3643 /* memory is low, delay before restarting */
3644 3644 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3645 3645 return (SET_ERROR(EAGAIN));
3646 3646 }
3647 3647 page_load = 0;
3648 3648 #endif
3649 3649 return (0);
3650 3650 }
3651 3651
3652 3652 void
3653 3653 arc_tempreserve_clear(uint64_t reserve)
3654 3654 {
3655 3655 atomic_add_64(&arc_tempreserve, -reserve);
3656 3656 ASSERT((int64_t)arc_tempreserve >= 0);
3657 3657 }
3658 3658
3659 3659 int
3660 3660 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3661 3661 {
3662 3662 int error;
3663 3663 uint64_t anon_size;
3664 3664
3665 3665 if (reserve > arc_c/4 && !arc_no_grow)
3666 3666 arc_c = MIN(arc_c_max, reserve * 4);
3667 3667 if (reserve > arc_c)
3668 3668 return (SET_ERROR(ENOMEM));
3669 3669
3670 3670 /*
3671 3671 * Don't count loaned bufs as in flight dirty data to prevent long
3672 3672 * network delays from blocking transactions that are ready to be
3673 3673 * assigned to a txg.
3674 3674 */
3675 3675 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3676 3676
3677 3677 /*
3678 3678 * Writes will, almost always, require additional memory allocations
3679 3679 * in order to compress/encrypt/etc the data. We therefore need to
3680 3680 * make sure that there is sufficient available memory for this.
3681 3681 */
3682 3682 error = arc_memory_throttle(reserve, txg);
3683 3683 if (error != 0)
3684 3684 return (error);
3685 3685
3686 3686 /*
3687 3687 * Throttle writes when the amount of dirty data in the cache
3688 3688 * gets too large. We try to keep the cache less than half full
3689 3689 * of dirty blocks so that our sync times don't grow too large.
3690 3690 * Note: if two requests come in concurrently, we might let them
3691 3691 * both succeed, when one of them should fail. Not a huge deal.
3692 3692 */
3693 3693
3694 3694 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3695 3695 anon_size > arc_c / 4) {
3696 3696 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3697 3697 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3698 3698 arc_tempreserve>>10,
3699 3699 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3700 3700 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3701 3701 reserve>>10, arc_c>>10);
3702 3702 return (SET_ERROR(ERESTART));
3703 3703 }
3704 3704 atomic_add_64(&arc_tempreserve, reserve);
3705 3705 return (0);
3706 3706 }
3707 3707
3708 3708 void
3709 3709 arc_init(void)
3710 3710 {
3711 3711 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3712 3712 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3713 3713
3714 3714 /* Convert seconds to clock ticks */
3715 3715 arc_min_prefetch_lifespan = 1 * hz;
3716 3716
3717 3717 /* Start out with 1/8 of all memory */
3718 3718 arc_c = physmem * PAGESIZE / 8;
3719 3719
3720 3720 #ifdef _KERNEL
3721 3721 /*
3722 3722 * On architectures where the physical memory can be larger
3723 3723 * than the addressable space (intel in 32-bit mode), we may
3724 3724 * need to limit the cache to 1/8 of VM size.
3725 3725 */
3726 3726 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3727 3727 #endif
3728 3728
3729 3729 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3730 3730 arc_c_min = MAX(arc_c / 4, 64<<20);
3731 3731 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3732 3732 if (arc_c * 8 >= 1<<30)
3733 3733 arc_c_max = (arc_c * 8) - (1<<30);
3734 3734 else
3735 3735 arc_c_max = arc_c_min;
3736 3736 arc_c_max = MAX(arc_c * 6, arc_c_max);
3737 3737
3738 3738 /*
3739 3739 * Allow the tunables to override our calculations if they are
3740 3740 * reasonable (ie. over 64MB)
3741 3741 */
3742 3742 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3743 3743 arc_c_max = zfs_arc_max;
3744 3744 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3745 3745 arc_c_min = zfs_arc_min;
3746 3746
3747 3747 arc_c = arc_c_max;
3748 3748 arc_p = (arc_c >> 1);
3749 3749
3750 3750 /* limit meta-data to 1/4 of the arc capacity */
3751 3751 arc_meta_limit = arc_c_max / 4;
3752 3752
3753 3753 /* Allow the tunable to override if it is reasonable */
3754 3754 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3755 3755 arc_meta_limit = zfs_arc_meta_limit;
3756 3756
3757 3757 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3758 3758 arc_c_min = arc_meta_limit / 2;
3759 3759
3760 3760 if (zfs_arc_grow_retry > 0)
3761 3761 arc_grow_retry = zfs_arc_grow_retry;
3762 3762
3763 3763 if (zfs_arc_shrink_shift > 0)
3764 3764 arc_shrink_shift = zfs_arc_shrink_shift;
3765 3765
3766 3766 if (zfs_arc_p_min_shift > 0)
3767 3767 arc_p_min_shift = zfs_arc_p_min_shift;
3768 3768
3769 3769 /* if kmem_flags are set, lets try to use less memory */
3770 3770 if (kmem_debugging())
3771 3771 arc_c = arc_c / 2;
3772 3772 if (arc_c < arc_c_min)
3773 3773 arc_c = arc_c_min;
3774 3774
3775 3775 arc_anon = &ARC_anon;
3776 3776 arc_mru = &ARC_mru;
3777 3777 arc_mru_ghost = &ARC_mru_ghost;
3778 3778 arc_mfu = &ARC_mfu;
3779 3779 arc_mfu_ghost = &ARC_mfu_ghost;
3780 3780 arc_l2c_only = &ARC_l2c_only;
3781 3781 arc_size = 0;
3782 3782
3783 3783 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3784 3784 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3785 3785 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3786 3786 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3787 3787 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3788 3788 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3789 3789
3790 3790 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3791 3791 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3792 3792 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3793 3793 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3794 3794 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3795 3795 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3796 3796 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3797 3797 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3798 3798 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3799 3799 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3800 3800 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3801 3801 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3802 3802 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3803 3803 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3804 3804 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3805 3805 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3806 3806 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3807 3807 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3808 3808 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3809 3809 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3810 3810
3811 3811 buf_init();
3812 3812
3813 3813 arc_thread_exit = 0;
3814 3814 arc_eviction_list = NULL;
3815 3815 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3816 3816 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3817 3817
3818 3818 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3819 3819 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3820 3820
3821 3821 if (arc_ksp != NULL) {
3822 3822 arc_ksp->ks_data = &arc_stats;
3823 3823 kstat_install(arc_ksp);
3824 3824 }
3825 3825
3826 3826 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3827 3827 TS_RUN, minclsyspri);
3828 3828
3829 3829 arc_dead = FALSE;
3830 3830 arc_warm = B_FALSE;
3831 3831
3832 3832 /*
3833 3833 * Calculate maximum amount of dirty data per pool.
3834 3834 *
3835 3835 * If it has been set by /etc/system, take that.
3836 3836 * Otherwise, use a percentage of physical memory defined by
3837 3837 * zfs_dirty_data_max_percent (default 10%) with a cap at
3838 3838 * zfs_dirty_data_max_max (default 4GB).
3839 3839 */
3840 3840 if (zfs_dirty_data_max == 0) {
3841 3841 zfs_dirty_data_max = physmem * PAGESIZE *
3842 3842 zfs_dirty_data_max_percent / 100;
3843 3843 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
3844 3844 zfs_dirty_data_max_max);
3845 3845 }
3846 3846 }
3847 3847
3848 3848 void
3849 3849 arc_fini(void)
3850 3850 {
3851 3851 mutex_enter(&arc_reclaim_thr_lock);
3852 3852 arc_thread_exit = 1;
3853 3853 while (arc_thread_exit != 0)
3854 3854 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3855 3855 mutex_exit(&arc_reclaim_thr_lock);
3856 3856
3857 3857 arc_flush(NULL);
3858 3858
3859 3859 arc_dead = TRUE;
3860 3860
3861 3861 if (arc_ksp != NULL) {
3862 3862 kstat_delete(arc_ksp);
3863 3863 arc_ksp = NULL;
3864 3864 }
3865 3865
3866 3866 mutex_destroy(&arc_eviction_mtx);
3867 3867 mutex_destroy(&arc_reclaim_thr_lock);
3868 3868 cv_destroy(&arc_reclaim_thr_cv);
3869 3869
3870 3870 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3871 3871 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3872 3872 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3873 3873 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3874 3874 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3875 3875 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3876 3876 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3877 3877 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3878 3878
3879 3879 mutex_destroy(&arc_anon->arcs_mtx);
3880 3880 mutex_destroy(&arc_mru->arcs_mtx);
3881 3881 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3882 3882 mutex_destroy(&arc_mfu->arcs_mtx);
3883 3883 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3884 3884 mutex_destroy(&arc_l2c_only->arcs_mtx);
3885 3885
3886 3886 buf_fini();
3887 3887
3888 3888 ASSERT(arc_loaned_bytes == 0);
3889 3889 }
3890 3890
3891 3891 /*
3892 3892 * Level 2 ARC
3893 3893 *
3894 3894 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3895 3895 * It uses dedicated storage devices to hold cached data, which are populated
3896 3896 * using large infrequent writes. The main role of this cache is to boost
3897 3897 * the performance of random read workloads. The intended L2ARC devices
3898 3898 * include short-stroked disks, solid state disks, and other media with
3899 3899 * substantially faster read latency than disk.
3900 3900 *
3901 3901 * +-----------------------+
3902 3902 * | ARC |
3903 3903 * +-----------------------+
3904 3904 * | ^ ^
3905 3905 * | | |
3906 3906 * l2arc_feed_thread() arc_read()
3907 3907 * | | |
3908 3908 * | l2arc read |
3909 3909 * V | |
3910 3910 * +---------------+ |
3911 3911 * | L2ARC | |
3912 3912 * +---------------+ |
3913 3913 * | ^ |
3914 3914 * l2arc_write() | |
3915 3915 * | | |
3916 3916 * V | |
3917 3917 * +-------+ +-------+
3918 3918 * | vdev | | vdev |
3919 3919 * | cache | | cache |
3920 3920 * +-------+ +-------+
3921 3921 * +=========+ .-----.
3922 3922 * : L2ARC : |-_____-|
3923 3923 * : devices : | Disks |
3924 3924 * +=========+ `-_____-'
3925 3925 *
3926 3926 * Read requests are satisfied from the following sources, in order:
3927 3927 *
3928 3928 * 1) ARC
3929 3929 * 2) vdev cache of L2ARC devices
3930 3930 * 3) L2ARC devices
3931 3931 * 4) vdev cache of disks
3932 3932 * 5) disks
3933 3933 *
3934 3934 * Some L2ARC device types exhibit extremely slow write performance.
3935 3935 * To accommodate for this there are some significant differences between
3936 3936 * the L2ARC and traditional cache design:
3937 3937 *
3938 3938 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3939 3939 * the ARC behave as usual, freeing buffers and placing headers on ghost
3940 3940 * lists. The ARC does not send buffers to the L2ARC during eviction as
3941 3941 * this would add inflated write latencies for all ARC memory pressure.
3942 3942 *
3943 3943 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3944 3944 * It does this by periodically scanning buffers from the eviction-end of
3945 3945 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3946 3946 * not already there. It scans until a headroom of buffers is satisfied,
3947 3947 * which itself is a buffer for ARC eviction. If a compressible buffer is
3948 3948 * found during scanning and selected for writing to an L2ARC device, we
3949 3949 * temporarily boost scanning headroom during the next scan cycle to make
3950 3950 * sure we adapt to compression effects (which might significantly reduce
3951 3951 * the data volume we write to L2ARC). The thread that does this is
3952 3952 * l2arc_feed_thread(), illustrated below; example sizes are included to
3953 3953 * provide a better sense of ratio than this diagram:
3954 3954 *
3955 3955 * head --> tail
3956 3956 * +---------------------+----------+
3957 3957 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3958 3958 * +---------------------+----------+ | o L2ARC eligible
3959 3959 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3960 3960 * +---------------------+----------+ |
3961 3961 * 15.9 Gbytes ^ 32 Mbytes |
3962 3962 * headroom |
3963 3963 * l2arc_feed_thread()
3964 3964 * |
3965 3965 * l2arc write hand <--[oooo]--'
3966 3966 * | 8 Mbyte
3967 3967 * | write max
3968 3968 * V
3969 3969 * +==============================+
3970 3970 * L2ARC dev |####|#|###|###| |####| ... |
3971 3971 * +==============================+
3972 3972 * 32 Gbytes
3973 3973 *
3974 3974 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3975 3975 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3976 3976 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3977 3977 * safe to say that this is an uncommon case, since buffers at the end of
3978 3978 * the ARC lists have moved there due to inactivity.
3979 3979 *
3980 3980 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3981 3981 * then the L2ARC simply misses copying some buffers. This serves as a
3982 3982 * pressure valve to prevent heavy read workloads from both stalling the ARC
3983 3983 * with waits and clogging the L2ARC with writes. This also helps prevent
3984 3984 * the potential for the L2ARC to churn if it attempts to cache content too
3985 3985 * quickly, such as during backups of the entire pool.
3986 3986 *
3987 3987 * 5. After system boot and before the ARC has filled main memory, there are
3988 3988 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3989 3989 * lists can remain mostly static. Instead of searching from tail of these
3990 3990 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3991 3991 * for eligible buffers, greatly increasing its chance of finding them.
3992 3992 *
3993 3993 * The L2ARC device write speed is also boosted during this time so that
3994 3994 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3995 3995 * there are no L2ARC reads, and no fear of degrading read performance
3996 3996 * through increased writes.
3997 3997 *
3998 3998 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3999 3999 * the vdev queue can aggregate them into larger and fewer writes. Each
4000 4000 * device is written to in a rotor fashion, sweeping writes through
4001 4001 * available space then repeating.
4002 4002 *
4003 4003 * 7. The L2ARC does not store dirty content. It never needs to flush
4004 4004 * write buffers back to disk based storage.
4005 4005 *
4006 4006 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4007 4007 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4008 4008 *
4009 4009 * The performance of the L2ARC can be tweaked by a number of tunables, which
4010 4010 * may be necessary for different workloads:
4011 4011 *
4012 4012 * l2arc_write_max max write bytes per interval
4013 4013 * l2arc_write_boost extra write bytes during device warmup
4014 4014 * l2arc_noprefetch skip caching prefetched buffers
4015 4015 * l2arc_headroom number of max device writes to precache
4016 4016 * l2arc_headroom_boost when we find compressed buffers during ARC
4017 4017 * scanning, we multiply headroom by this
4018 4018 * percentage factor for the next scan cycle,
4019 4019 * since more compressed buffers are likely to
4020 4020 * be present
4021 4021 * l2arc_feed_secs seconds between L2ARC writing
4022 4022 *
4023 4023 * Tunables may be removed or added as future performance improvements are
4024 4024 * integrated, and also may become zpool properties.
4025 4025 *
4026 4026 * There are three key functions that control how the L2ARC warms up:
4027 4027 *
4028 4028 * l2arc_write_eligible() check if a buffer is eligible to cache
4029 4029 * l2arc_write_size() calculate how much to write
4030 4030 * l2arc_write_interval() calculate sleep delay between writes
4031 4031 *
4032 4032 * These three functions determine what to write, how much, and how quickly
4033 4033 * to send writes.
4034 4034 */
4035 4035
4036 4036 static boolean_t
4037 4037 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4038 4038 {
4039 4039 /*
4040 4040 * A buffer is *not* eligible for the L2ARC if it:
4041 4041 * 1. belongs to a different spa.
4042 4042 * 2. is already cached on the L2ARC.
4043 4043 * 3. has an I/O in progress (it may be an incomplete read).
4044 4044 * 4. is flagged not eligible (zfs property).
4045 4045 */
4046 4046 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
4047 4047 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
4048 4048 return (B_FALSE);
4049 4049
4050 4050 return (B_TRUE);
4051 4051 }
4052 4052
4053 4053 static uint64_t
4054 4054 l2arc_write_size(void)
4055 4055 {
4056 4056 uint64_t size;
4057 4057
4058 4058 /*
4059 4059 * Make sure our globals have meaningful values in case the user
4060 4060 * altered them.
4061 4061 */
4062 4062 size = l2arc_write_max;
4063 4063 if (size == 0) {
4064 4064 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4065 4065 "be greater than zero, resetting it to the default (%d)",
4066 4066 L2ARC_WRITE_SIZE);
4067 4067 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4068 4068 }
4069 4069
4070 4070 if (arc_warm == B_FALSE)
4071 4071 size += l2arc_write_boost;
4072 4072
4073 4073 return (size);
4074 4074
4075 4075 }
4076 4076
4077 4077 static clock_t
4078 4078 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4079 4079 {
4080 4080 clock_t interval, next, now;
4081 4081
4082 4082 /*
4083 4083 * If the ARC lists are busy, increase our write rate; if the
4084 4084 * lists are stale, idle back. This is achieved by checking
4085 4085 * how much we previously wrote - if it was more than half of
4086 4086 * what we wanted, schedule the next write much sooner.
4087 4087 */
4088 4088 if (l2arc_feed_again && wrote > (wanted / 2))
4089 4089 interval = (hz * l2arc_feed_min_ms) / 1000;
4090 4090 else
4091 4091 interval = hz * l2arc_feed_secs;
4092 4092
4093 4093 now = ddi_get_lbolt();
4094 4094 next = MAX(now, MIN(now + interval, began + interval));
4095 4095
4096 4096 return (next);
4097 4097 }
4098 4098
4099 4099 static void
4100 4100 l2arc_hdr_stat_add(void)
4101 4101 {
4102 4102 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4103 4103 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4104 4104 }
4105 4105
4106 4106 static void
4107 4107 l2arc_hdr_stat_remove(void)
4108 4108 {
4109 4109 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4110 4110 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4111 4111 }
4112 4112
4113 4113 /*
4114 4114 * Cycle through L2ARC devices. This is how L2ARC load balances.
4115 4115 * If a device is returned, this also returns holding the spa config lock.
4116 4116 */
4117 4117 static l2arc_dev_t *
4118 4118 l2arc_dev_get_next(void)
4119 4119 {
4120 4120 l2arc_dev_t *first, *next = NULL;
4121 4121
4122 4122 /*
4123 4123 * Lock out the removal of spas (spa_namespace_lock), then removal
4124 4124 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4125 4125 * both locks will be dropped and a spa config lock held instead.
4126 4126 */
4127 4127 mutex_enter(&spa_namespace_lock);
4128 4128 mutex_enter(&l2arc_dev_mtx);
4129 4129
4130 4130 /* if there are no vdevs, there is nothing to do */
4131 4131 if (l2arc_ndev == 0)
4132 4132 goto out;
4133 4133
4134 4134 first = NULL;
4135 4135 next = l2arc_dev_last;
4136 4136 do {
4137 4137 /* loop around the list looking for a non-faulted vdev */
4138 4138 if (next == NULL) {
4139 4139 next = list_head(l2arc_dev_list);
4140 4140 } else {
4141 4141 next = list_next(l2arc_dev_list, next);
4142 4142 if (next == NULL)
4143 4143 next = list_head(l2arc_dev_list);
4144 4144 }
4145 4145
4146 4146 /* if we have come back to the start, bail out */
4147 4147 if (first == NULL)
4148 4148 first = next;
4149 4149 else if (next == first)
4150 4150 break;
4151 4151
4152 4152 } while (vdev_is_dead(next->l2ad_vdev));
4153 4153
4154 4154 /* if we were unable to find any usable vdevs, return NULL */
4155 4155 if (vdev_is_dead(next->l2ad_vdev))
4156 4156 next = NULL;
4157 4157
4158 4158 l2arc_dev_last = next;
4159 4159
4160 4160 out:
4161 4161 mutex_exit(&l2arc_dev_mtx);
4162 4162
4163 4163 /*
4164 4164 * Grab the config lock to prevent the 'next' device from being
4165 4165 * removed while we are writing to it.
4166 4166 */
4167 4167 if (next != NULL)
4168 4168 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4169 4169 mutex_exit(&spa_namespace_lock);
4170 4170
4171 4171 return (next);
4172 4172 }
4173 4173
4174 4174 /*
4175 4175 * Free buffers that were tagged for destruction.
4176 4176 */
4177 4177 static void
4178 4178 l2arc_do_free_on_write()
4179 4179 {
4180 4180 list_t *buflist;
4181 4181 l2arc_data_free_t *df, *df_prev;
4182 4182
4183 4183 mutex_enter(&l2arc_free_on_write_mtx);
4184 4184 buflist = l2arc_free_on_write;
4185 4185
4186 4186 for (df = list_tail(buflist); df; df = df_prev) {
4187 4187 df_prev = list_prev(buflist, df);
4188 4188 ASSERT(df->l2df_data != NULL);
4189 4189 ASSERT(df->l2df_func != NULL);
4190 4190 df->l2df_func(df->l2df_data, df->l2df_size);
4191 4191 list_remove(buflist, df);
4192 4192 kmem_free(df, sizeof (l2arc_data_free_t));
4193 4193 }
4194 4194
4195 4195 mutex_exit(&l2arc_free_on_write_mtx);
4196 4196 }
4197 4197
4198 4198 /*
4199 4199 * A write to a cache device has completed. Update all headers to allow
4200 4200 * reads from these buffers to begin.
4201 4201 */
4202 4202 static void
4203 4203 l2arc_write_done(zio_t *zio)
4204 4204 {
4205 4205 l2arc_write_callback_t *cb;
4206 4206 l2arc_dev_t *dev;
4207 4207 list_t *buflist;
4208 4208 arc_buf_hdr_t *head, *ab, *ab_prev;
4209 4209 l2arc_buf_hdr_t *abl2;
4210 4210 kmutex_t *hash_lock;
4211 4211
4212 4212 cb = zio->io_private;
4213 4213 ASSERT(cb != NULL);
4214 4214 dev = cb->l2wcb_dev;
4215 4215 ASSERT(dev != NULL);
4216 4216 head = cb->l2wcb_head;
4217 4217 ASSERT(head != NULL);
4218 4218 buflist = dev->l2ad_buflist;
4219 4219 ASSERT(buflist != NULL);
4220 4220 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4221 4221 l2arc_write_callback_t *, cb);
4222 4222
4223 4223 if (zio->io_error != 0)
4224 4224 ARCSTAT_BUMP(arcstat_l2_writes_error);
4225 4225
4226 4226 mutex_enter(&l2arc_buflist_mtx);
4227 4227
4228 4228 /*
4229 4229 * All writes completed, or an error was hit.
4230 4230 */
4231 4231 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4232 4232 ab_prev = list_prev(buflist, ab);
4233 4233
4234 4234 hash_lock = HDR_LOCK(ab);
4235 4235 if (!mutex_tryenter(hash_lock)) {
4236 4236 /*
4237 4237 * This buffer misses out. It may be in a stage
4238 4238 * of eviction. Its ARC_L2_WRITING flag will be
4239 4239 * left set, denying reads to this buffer.
4240 4240 */
4241 4241 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4242 4242 continue;
4243 4243 }
4244 4244
4245 4245 abl2 = ab->b_l2hdr;
4246 4246
4247 4247 /*
4248 4248 * Release the temporary compressed buffer as soon as possible.
4249 4249 */
4250 4250 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4251 4251 l2arc_release_cdata_buf(ab);
4252 4252
4253 4253 if (zio->io_error != 0) {
4254 4254 /*
4255 4255 * Error - drop L2ARC entry.
4256 4256 */
4257 4257 list_remove(buflist, ab);
4258 4258 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4259 4259 ab->b_l2hdr = NULL;
4260 4260 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4261 4261 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4262 4262 }
4263 4263
4264 4264 /*
4265 4265 * Allow ARC to begin reads to this L2ARC entry.
4266 4266 */
4267 4267 ab->b_flags &= ~ARC_L2_WRITING;
4268 4268
4269 4269 mutex_exit(hash_lock);
4270 4270 }
4271 4271
4272 4272 atomic_inc_64(&l2arc_writes_done);
4273 4273 list_remove(buflist, head);
4274 4274 kmem_cache_free(hdr_cache, head);
4275 4275 mutex_exit(&l2arc_buflist_mtx);
4276 4276
4277 4277 l2arc_do_free_on_write();
4278 4278
4279 4279 kmem_free(cb, sizeof (l2arc_write_callback_t));
4280 4280 }
4281 4281
4282 4282 /*
4283 4283 * A read to a cache device completed. Validate buffer contents before
4284 4284 * handing over to the regular ARC routines.
4285 4285 */
4286 4286 static void
4287 4287 l2arc_read_done(zio_t *zio)
4288 4288 {
4289 4289 l2arc_read_callback_t *cb;
4290 4290 arc_buf_hdr_t *hdr;
4291 4291 arc_buf_t *buf;
4292 4292 kmutex_t *hash_lock;
4293 4293 int equal;
4294 4294
4295 4295 ASSERT(zio->io_vd != NULL);
4296 4296 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4297 4297
4298 4298 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4299 4299
4300 4300 cb = zio->io_private;
4301 4301 ASSERT(cb != NULL);
4302 4302 buf = cb->l2rcb_buf;
4303 4303 ASSERT(buf != NULL);
4304 4304
4305 4305 hash_lock = HDR_LOCK(buf->b_hdr);
4306 4306 mutex_enter(hash_lock);
4307 4307 hdr = buf->b_hdr;
4308 4308 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4309 4309
4310 4310 /*
4311 4311 * If the buffer was compressed, decompress it first.
4312 4312 */
4313 4313 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4314 4314 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4315 4315 ASSERT(zio->io_data != NULL);
4316 4316
4317 4317 /*
4318 4318 * Check this survived the L2ARC journey.
4319 4319 */
4320 4320 equal = arc_cksum_equal(buf);
4321 4321 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4322 4322 mutex_exit(hash_lock);
4323 4323 zio->io_private = buf;
4324 4324 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4325 4325 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4326 4326 arc_read_done(zio);
4327 4327 } else {
4328 4328 mutex_exit(hash_lock);
4329 4329 /*
4330 4330 * Buffer didn't survive caching. Increment stats and
4331 4331 * reissue to the original storage device.
4332 4332 */
4333 4333 if (zio->io_error != 0) {
4334 4334 ARCSTAT_BUMP(arcstat_l2_io_error);
4335 4335 } else {
4336 4336 zio->io_error = SET_ERROR(EIO);
4337 4337 }
4338 4338 if (!equal)
4339 4339 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4340 4340
4341 4341 /*
4342 4342 * If there's no waiter, issue an async i/o to the primary
4343 4343 * storage now. If there *is* a waiter, the caller must
4344 4344 * issue the i/o in a context where it's OK to block.
4345 4345 */
4346 4346 if (zio->io_waiter == NULL) {
4347 4347 zio_t *pio = zio_unique_parent(zio);
4348 4348
4349 4349 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4350 4350
4351 4351 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4352 4352 buf->b_data, zio->io_size, arc_read_done, buf,
4353 4353 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4354 4354 }
4355 4355 }
4356 4356
4357 4357 kmem_free(cb, sizeof (l2arc_read_callback_t));
4358 4358 }
4359 4359
4360 4360 /*
4361 4361 * This is the list priority from which the L2ARC will search for pages to
4362 4362 * cache. This is used within loops (0..3) to cycle through lists in the
4363 4363 * desired order. This order can have a significant effect on cache
4364 4364 * performance.
4365 4365 *
4366 4366 * Currently the metadata lists are hit first, MFU then MRU, followed by
4367 4367 * the data lists. This function returns a locked list, and also returns
4368 4368 * the lock pointer.
4369 4369 */
4370 4370 static list_t *
4371 4371 l2arc_list_locked(int list_num, kmutex_t **lock)
4372 4372 {
4373 4373 list_t *list = NULL;
4374 4374
4375 4375 ASSERT(list_num >= 0 && list_num <= 3);
4376 4376
4377 4377 switch (list_num) {
4378 4378 case 0:
4379 4379 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4380 4380 *lock = &arc_mfu->arcs_mtx;
4381 4381 break;
4382 4382 case 1:
4383 4383 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4384 4384 *lock = &arc_mru->arcs_mtx;
4385 4385 break;
4386 4386 case 2:
4387 4387 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4388 4388 *lock = &arc_mfu->arcs_mtx;
4389 4389 break;
4390 4390 case 3:
4391 4391 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4392 4392 *lock = &arc_mru->arcs_mtx;
4393 4393 break;
4394 4394 }
4395 4395
4396 4396 ASSERT(!(MUTEX_HELD(*lock)));
4397 4397 mutex_enter(*lock);
4398 4398 return (list);
4399 4399 }
4400 4400
4401 4401 /*
4402 4402 * Evict buffers from the device write hand to the distance specified in
4403 4403 * bytes. This distance may span populated buffers, it may span nothing.
4404 4404 * This is clearing a region on the L2ARC device ready for writing.
4405 4405 * If the 'all' boolean is set, every buffer is evicted.
4406 4406 */
4407 4407 static void
4408 4408 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4409 4409 {
4410 4410 list_t *buflist;
4411 4411 l2arc_buf_hdr_t *abl2;
4412 4412 arc_buf_hdr_t *ab, *ab_prev;
4413 4413 kmutex_t *hash_lock;
4414 4414 uint64_t taddr;
4415 4415
4416 4416 buflist = dev->l2ad_buflist;
4417 4417
4418 4418 if (buflist == NULL)
4419 4419 return;
4420 4420
4421 4421 if (!all && dev->l2ad_first) {
4422 4422 /*
4423 4423 * This is the first sweep through the device. There is
4424 4424 * nothing to evict.
4425 4425 */
4426 4426 return;
4427 4427 }
4428 4428
4429 4429 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4430 4430 /*
4431 4431 * When nearing the end of the device, evict to the end
4432 4432 * before the device write hand jumps to the start.
4433 4433 */
4434 4434 taddr = dev->l2ad_end;
4435 4435 } else {
4436 4436 taddr = dev->l2ad_hand + distance;
4437 4437 }
4438 4438 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4439 4439 uint64_t, taddr, boolean_t, all);
4440 4440
4441 4441 top:
4442 4442 mutex_enter(&l2arc_buflist_mtx);
4443 4443 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4444 4444 ab_prev = list_prev(buflist, ab);
4445 4445
4446 4446 hash_lock = HDR_LOCK(ab);
4447 4447 if (!mutex_tryenter(hash_lock)) {
4448 4448 /*
4449 4449 * Missed the hash lock. Retry.
4450 4450 */
4451 4451 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4452 4452 mutex_exit(&l2arc_buflist_mtx);
4453 4453 mutex_enter(hash_lock);
4454 4454 mutex_exit(hash_lock);
4455 4455 goto top;
4456 4456 }
4457 4457
4458 4458 if (HDR_L2_WRITE_HEAD(ab)) {
4459 4459 /*
4460 4460 * We hit a write head node. Leave it for
4461 4461 * l2arc_write_done().
4462 4462 */
4463 4463 list_remove(buflist, ab);
4464 4464 mutex_exit(hash_lock);
4465 4465 continue;
4466 4466 }
4467 4467
4468 4468 if (!all && ab->b_l2hdr != NULL &&
4469 4469 (ab->b_l2hdr->b_daddr > taddr ||
4470 4470 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4471 4471 /*
4472 4472 * We've evicted to the target address,
4473 4473 * or the end of the device.
4474 4474 */
4475 4475 mutex_exit(hash_lock);
4476 4476 break;
4477 4477 }
4478 4478
4479 4479 if (HDR_FREE_IN_PROGRESS(ab)) {
4480 4480 /*
4481 4481 * Already on the path to destruction.
4482 4482 */
4483 4483 mutex_exit(hash_lock);
4484 4484 continue;
4485 4485 }
4486 4486
4487 4487 if (ab->b_state == arc_l2c_only) {
4488 4488 ASSERT(!HDR_L2_READING(ab));
4489 4489 /*
4490 4490 * This doesn't exist in the ARC. Destroy.
4491 4491 * arc_hdr_destroy() will call list_remove()
4492 4492 * and decrement arcstat_l2_size.
4493 4493 */
4494 4494 arc_change_state(arc_anon, ab, hash_lock);
4495 4495 arc_hdr_destroy(ab);
4496 4496 } else {
4497 4497 /*
4498 4498 * Invalidate issued or about to be issued
4499 4499 * reads, since we may be about to write
4500 4500 * over this location.
4501 4501 */
4502 4502 if (HDR_L2_READING(ab)) {
4503 4503 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4504 4504 ab->b_flags |= ARC_L2_EVICTED;
4505 4505 }
4506 4506
4507 4507 /*
4508 4508 * Tell ARC this no longer exists in L2ARC.
4509 4509 */
4510 4510 if (ab->b_l2hdr != NULL) {
4511 4511 abl2 = ab->b_l2hdr;
4512 4512 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4513 4513 ab->b_l2hdr = NULL;
4514 4514 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4515 4515 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4516 4516 }
4517 4517 list_remove(buflist, ab);
4518 4518
4519 4519 /*
4520 4520 * This may have been leftover after a
4521 4521 * failed write.
4522 4522 */
4523 4523 ab->b_flags &= ~ARC_L2_WRITING;
4524 4524 }
4525 4525 mutex_exit(hash_lock);
4526 4526 }
4527 4527 mutex_exit(&l2arc_buflist_mtx);
4528 4528
4529 4529 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4530 4530 dev->l2ad_evict = taddr;
4531 4531 }
4532 4532
4533 4533 /*
4534 4534 * Find and write ARC buffers to the L2ARC device.
4535 4535 *
4536 4536 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4537 4537 * for reading until they have completed writing.
4538 4538 * The headroom_boost is an in-out parameter used to maintain headroom boost
4539 4539 * state between calls to this function.
4540 4540 *
4541 4541 * Returns the number of bytes actually written (which may be smaller than
4542 4542 * the delta by which the device hand has changed due to alignment).
4543 4543 */
4544 4544 static uint64_t
4545 4545 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4546 4546 boolean_t *headroom_boost)
4547 4547 {
4548 4548 arc_buf_hdr_t *ab, *ab_prev, *head;
4549 4549 list_t *list;
4550 4550 uint64_t write_asize, write_psize, write_sz, headroom,
4551 4551 buf_compress_minsz;
4552 4552 void *buf_data;
4553 4553 kmutex_t *list_lock;
4554 4554 boolean_t full;
4555 4555 l2arc_write_callback_t *cb;
4556 4556 zio_t *pio, *wzio;
4557 4557 uint64_t guid = spa_load_guid(spa);
4558 4558 const boolean_t do_headroom_boost = *headroom_boost;
4559 4559
4560 4560 ASSERT(dev->l2ad_vdev != NULL);
4561 4561
4562 4562 /* Lower the flag now, we might want to raise it again later. */
4563 4563 *headroom_boost = B_FALSE;
4564 4564
4565 4565 pio = NULL;
4566 4566 write_sz = write_asize = write_psize = 0;
4567 4567 full = B_FALSE;
4568 4568 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4569 4569 head->b_flags |= ARC_L2_WRITE_HEAD;
4570 4570
4571 4571 /*
4572 4572 * We will want to try to compress buffers that are at least 2x the
4573 4573 * device sector size.
4574 4574 */
4575 4575 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4576 4576
4577 4577 /*
4578 4578 * Copy buffers for L2ARC writing.
4579 4579 */
4580 4580 mutex_enter(&l2arc_buflist_mtx);
4581 4581 for (int try = 0; try <= 3; try++) {
4582 4582 uint64_t passed_sz = 0;
4583 4583
4584 4584 list = l2arc_list_locked(try, &list_lock);
4585 4585
4586 4586 /*
4587 4587 * L2ARC fast warmup.
4588 4588 *
4589 4589 * Until the ARC is warm and starts to evict, read from the
4590 4590 * head of the ARC lists rather than the tail.
4591 4591 */
4592 4592 if (arc_warm == B_FALSE)
4593 4593 ab = list_head(list);
4594 4594 else
4595 4595 ab = list_tail(list);
4596 4596
4597 4597 headroom = target_sz * l2arc_headroom;
4598 4598 if (do_headroom_boost)
4599 4599 headroom = (headroom * l2arc_headroom_boost) / 100;
4600 4600
4601 4601 for (; ab; ab = ab_prev) {
4602 4602 l2arc_buf_hdr_t *l2hdr;
4603 4603 kmutex_t *hash_lock;
4604 4604 uint64_t buf_sz;
4605 4605
4606 4606 if (arc_warm == B_FALSE)
4607 4607 ab_prev = list_next(list, ab);
4608 4608 else
4609 4609 ab_prev = list_prev(list, ab);
4610 4610
4611 4611 hash_lock = HDR_LOCK(ab);
4612 4612 if (!mutex_tryenter(hash_lock)) {
4613 4613 /*
4614 4614 * Skip this buffer rather than waiting.
4615 4615 */
4616 4616 continue;
4617 4617 }
4618 4618
4619 4619 passed_sz += ab->b_size;
4620 4620 if (passed_sz > headroom) {
4621 4621 /*
4622 4622 * Searched too far.
4623 4623 */
4624 4624 mutex_exit(hash_lock);
4625 4625 break;
4626 4626 }
4627 4627
4628 4628 if (!l2arc_write_eligible(guid, ab)) {
4629 4629 mutex_exit(hash_lock);
4630 4630 continue;
4631 4631 }
4632 4632
4633 4633 if ((write_sz + ab->b_size) > target_sz) {
4634 4634 full = B_TRUE;
4635 4635 mutex_exit(hash_lock);
4636 4636 break;
4637 4637 }
4638 4638
4639 4639 if (pio == NULL) {
4640 4640 /*
4641 4641 * Insert a dummy header on the buflist so
4642 4642 * l2arc_write_done() can find where the
4643 4643 * write buffers begin without searching.
4644 4644 */
4645 4645 list_insert_head(dev->l2ad_buflist, head);
4646 4646
4647 4647 cb = kmem_alloc(
4648 4648 sizeof (l2arc_write_callback_t), KM_SLEEP);
4649 4649 cb->l2wcb_dev = dev;
4650 4650 cb->l2wcb_head = head;
4651 4651 pio = zio_root(spa, l2arc_write_done, cb,
4652 4652 ZIO_FLAG_CANFAIL);
4653 4653 }
4654 4654
4655 4655 /*
4656 4656 * Create and add a new L2ARC header.
4657 4657 */
4658 4658 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4659 4659 l2hdr->b_dev = dev;
4660 4660 ab->b_flags |= ARC_L2_WRITING;
4661 4661
4662 4662 /*
4663 4663 * Temporarily stash the data buffer in b_tmp_cdata.
4664 4664 * The subsequent write step will pick it up from
4665 4665 * there. This is because can't access ab->b_buf
4666 4666 * without holding the hash_lock, which we in turn
4667 4667 * can't access without holding the ARC list locks
4668 4668 * (which we want to avoid during compression/writing).
4669 4669 */
4670 4670 l2hdr->b_compress = ZIO_COMPRESS_OFF;
4671 4671 l2hdr->b_asize = ab->b_size;
4672 4672 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4673 4673
4674 4674 buf_sz = ab->b_size;
4675 4675 ab->b_l2hdr = l2hdr;
4676 4676
4677 4677 list_insert_head(dev->l2ad_buflist, ab);
4678 4678
4679 4679 /*
4680 4680 * Compute and store the buffer cksum before
4681 4681 * writing. On debug the cksum is verified first.
4682 4682 */
4683 4683 arc_cksum_verify(ab->b_buf);
4684 4684 arc_cksum_compute(ab->b_buf, B_TRUE);
4685 4685
4686 4686 mutex_exit(hash_lock);
4687 4687
4688 4688 write_sz += buf_sz;
4689 4689 }
4690 4690
4691 4691 mutex_exit(list_lock);
4692 4692
4693 4693 if (full == B_TRUE)
4694 4694 break;
4695 4695 }
4696 4696
4697 4697 /* No buffers selected for writing? */
4698 4698 if (pio == NULL) {
4699 4699 ASSERT0(write_sz);
4700 4700 mutex_exit(&l2arc_buflist_mtx);
4701 4701 kmem_cache_free(hdr_cache, head);
4702 4702 return (0);
4703 4703 }
4704 4704
4705 4705 /*
4706 4706 * Now start writing the buffers. We're starting at the write head
4707 4707 * and work backwards, retracing the course of the buffer selector
4708 4708 * loop above.
4709 4709 */
4710 4710 for (ab = list_prev(dev->l2ad_buflist, head); ab;
4711 4711 ab = list_prev(dev->l2ad_buflist, ab)) {
4712 4712 l2arc_buf_hdr_t *l2hdr;
4713 4713 uint64_t buf_sz;
4714 4714
4715 4715 /*
4716 4716 * We shouldn't need to lock the buffer here, since we flagged
4717 4717 * it as ARC_L2_WRITING in the previous step, but we must take
4718 4718 * care to only access its L2 cache parameters. In particular,
4719 4719 * ab->b_buf may be invalid by now due to ARC eviction.
4720 4720 */
4721 4721 l2hdr = ab->b_l2hdr;
4722 4722 l2hdr->b_daddr = dev->l2ad_hand;
4723 4723
4724 4724 if ((ab->b_flags & ARC_L2COMPRESS) &&
4725 4725 l2hdr->b_asize >= buf_compress_minsz) {
4726 4726 if (l2arc_compress_buf(l2hdr)) {
4727 4727 /*
4728 4728 * If compression succeeded, enable headroom
4729 4729 * boost on the next scan cycle.
4730 4730 */
4731 4731 *headroom_boost = B_TRUE;
4732 4732 }
4733 4733 }
4734 4734
4735 4735 /*
4736 4736 * Pick up the buffer data we had previously stashed away
4737 4737 * (and now potentially also compressed).
4738 4738 */
4739 4739 buf_data = l2hdr->b_tmp_cdata;
4740 4740 buf_sz = l2hdr->b_asize;
4741 4741
4742 4742 /* Compression may have squashed the buffer to zero length. */
4743 4743 if (buf_sz != 0) {
4744 4744 uint64_t buf_p_sz;
4745 4745
4746 4746 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4747 4747 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4748 4748 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4749 4749 ZIO_FLAG_CANFAIL, B_FALSE);
4750 4750
4751 4751 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4752 4752 zio_t *, wzio);
4753 4753 (void) zio_nowait(wzio);
4754 4754
4755 4755 write_asize += buf_sz;
4756 4756 /*
4757 4757 * Keep the clock hand suitably device-aligned.
4758 4758 */
4759 4759 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4760 4760 write_psize += buf_p_sz;
4761 4761 dev->l2ad_hand += buf_p_sz;
4762 4762 }
4763 4763 }
4764 4764
4765 4765 mutex_exit(&l2arc_buflist_mtx);
4766 4766
4767 4767 ASSERT3U(write_asize, <=, target_sz);
4768 4768 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4769 4769 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
4770 4770 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4771 4771 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
4772 4772 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
4773 4773
4774 4774 /*
4775 4775 * Bump device hand to the device start if it is approaching the end.
4776 4776 * l2arc_evict() will already have evicted ahead for this case.
4777 4777 */
4778 4778 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4779 4779 vdev_space_update(dev->l2ad_vdev,
4780 4780 dev->l2ad_end - dev->l2ad_hand, 0, 0);
4781 4781 dev->l2ad_hand = dev->l2ad_start;
4782 4782 dev->l2ad_evict = dev->l2ad_start;
4783 4783 dev->l2ad_first = B_FALSE;
4784 4784 }
4785 4785
4786 4786 dev->l2ad_writing = B_TRUE;
4787 4787 (void) zio_wait(pio);
4788 4788 dev->l2ad_writing = B_FALSE;
4789 4789
4790 4790 return (write_asize);
4791 4791 }
4792 4792
4793 4793 /*
4794 4794 * Compresses an L2ARC buffer.
4795 4795 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
4796 4796 * size in l2hdr->b_asize. This routine tries to compress the data and
4797 4797 * depending on the compression result there are three possible outcomes:
4798 4798 * *) The buffer was incompressible. The original l2hdr contents were left
4799 4799 * untouched and are ready for writing to an L2 device.
4800 4800 * *) The buffer was all-zeros, so there is no need to write it to an L2
4801 4801 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
4802 4802 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
4803 4803 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
4804 4804 * data buffer which holds the compressed data to be written, and b_asize
4805 4805 * tells us how much data there is. b_compress is set to the appropriate
4806 4806 * compression algorithm. Once writing is done, invoke
4807 4807 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
4808 4808 *
4809 4809 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
4810 4810 * buffer was incompressible).
4811 4811 */
4812 4812 static boolean_t
4813 4813 l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
4814 4814 {
4815 4815 void *cdata;
4816 4816 size_t csize, len;
4817 4817
4818 4818 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
4819 4819 ASSERT(l2hdr->b_tmp_cdata != NULL);
4820 4820
4821 4821 len = l2hdr->b_asize;
4822 4822 cdata = zio_data_buf_alloc(len);
4823 4823 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
4824 4824 cdata, l2hdr->b_asize);
4825 4825
4826 4826 if (csize == 0) {
4827 4827 /* zero block, indicate that there's nothing to write */
4828 4828 zio_data_buf_free(cdata, len);
4829 4829 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
4830 4830 l2hdr->b_asize = 0;
4831 4831 l2hdr->b_tmp_cdata = NULL;
4832 4832 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
4833 4833 return (B_TRUE);
4834 4834 } else if (csize > 0 && csize < len) {
4835 4835 /*
4836 4836 * Compression succeeded, we'll keep the cdata around for
4837 4837 * writing and release it afterwards.
4838 4838 */
4839 4839 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
4840 4840 l2hdr->b_asize = csize;
4841 4841 l2hdr->b_tmp_cdata = cdata;
4842 4842 ARCSTAT_BUMP(arcstat_l2_compress_successes);
4843 4843 return (B_TRUE);
4844 4844 } else {
4845 4845 /*
4846 4846 * Compression failed, release the compressed buffer.
4847 4847 * l2hdr will be left unmodified.
4848 4848 */
4849 4849 zio_data_buf_free(cdata, len);
4850 4850 ARCSTAT_BUMP(arcstat_l2_compress_failures);
4851 4851 return (B_FALSE);
4852 4852 }
4853 4853 }
4854 4854
4855 4855 /*
4856 4856 * Decompresses a zio read back from an l2arc device. On success, the
4857 4857 * underlying zio's io_data buffer is overwritten by the uncompressed
4858 4858 * version. On decompression error (corrupt compressed stream), the
4859 4859 * zio->io_error value is set to signal an I/O error.
4860 4860 *
4861 4861 * Please note that the compressed data stream is not checksummed, so
4862 4862 * if the underlying device is experiencing data corruption, we may feed
4863 4863 * corrupt data to the decompressor, so the decompressor needs to be
4864 4864 * able to handle this situation (LZ4 does).
4865 4865 */
4866 4866 static void
4867 4867 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
4868 4868 {
4869 4869 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
4870 4870
4871 4871 if (zio->io_error != 0) {
4872 4872 /*
4873 4873 * An io error has occured, just restore the original io
4874 4874 * size in preparation for a main pool read.
4875 4875 */
4876 4876 zio->io_orig_size = zio->io_size = hdr->b_size;
4877 4877 return;
4878 4878 }
4879 4879
4880 4880 if (c == ZIO_COMPRESS_EMPTY) {
4881 4881 /*
4882 4882 * An empty buffer results in a null zio, which means we
4883 4883 * need to fill its io_data after we're done restoring the
4884 4884 * buffer's contents.
4885 4885 */
4886 4886 ASSERT(hdr->b_buf != NULL);
4887 4887 bzero(hdr->b_buf->b_data, hdr->b_size);
4888 4888 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
4889 4889 } else {
4890 4890 ASSERT(zio->io_data != NULL);
4891 4891 /*
4892 4892 * We copy the compressed data from the start of the arc buffer
4893 4893 * (the zio_read will have pulled in only what we need, the
4894 4894 * rest is garbage which we will overwrite at decompression)
4895 4895 * and then decompress back to the ARC data buffer. This way we
4896 4896 * can minimize copying by simply decompressing back over the
4897 4897 * original compressed data (rather than decompressing to an
4898 4898 * aux buffer and then copying back the uncompressed buffer,
4899 4899 * which is likely to be much larger).
4900 4900 */
4901 4901 uint64_t csize;
4902 4902 void *cdata;
4903 4903
4904 4904 csize = zio->io_size;
4905 4905 cdata = zio_data_buf_alloc(csize);
4906 4906 bcopy(zio->io_data, cdata, csize);
4907 4907 if (zio_decompress_data(c, cdata, zio->io_data, csize,
4908 4908 hdr->b_size) != 0)
4909 4909 zio->io_error = EIO;
4910 4910 zio_data_buf_free(cdata, csize);
4911 4911 }
4912 4912
4913 4913 /* Restore the expected uncompressed IO size. */
4914 4914 zio->io_orig_size = zio->io_size = hdr->b_size;
4915 4915 }
4916 4916
4917 4917 /*
4918 4918 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
4919 4919 * This buffer serves as a temporary holder of compressed data while
4920 4920 * the buffer entry is being written to an l2arc device. Once that is
4921 4921 * done, we can dispose of it.
4922 4922 */
4923 4923 static void
4924 4924 l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
4925 4925 {
4926 4926 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
4927 4927
4928 4928 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
4929 4929 /*
4930 4930 * If the data was compressed, then we've allocated a
4931 4931 * temporary buffer for it, so now we need to release it.
4932 4932 */
4933 4933 ASSERT(l2hdr->b_tmp_cdata != NULL);
4934 4934 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
4935 4935 }
4936 4936 l2hdr->b_tmp_cdata = NULL;
4937 4937 }
4938 4938
4939 4939 /*
4940 4940 * This thread feeds the L2ARC at regular intervals. This is the beating
4941 4941 * heart of the L2ARC.
4942 4942 */
4943 4943 static void
4944 4944 l2arc_feed_thread(void)
4945 4945 {
4946 4946 callb_cpr_t cpr;
4947 4947 l2arc_dev_t *dev;
4948 4948 spa_t *spa;
4949 4949 uint64_t size, wrote;
4950 4950 clock_t begin, next = ddi_get_lbolt();
4951 4951 boolean_t headroom_boost = B_FALSE;
4952 4952
4953 4953 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4954 4954
4955 4955 mutex_enter(&l2arc_feed_thr_lock);
4956 4956
4957 4957 while (l2arc_thread_exit == 0) {
4958 4958 CALLB_CPR_SAFE_BEGIN(&cpr);
4959 4959 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4960 4960 next);
4961 4961 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4962 4962 next = ddi_get_lbolt() + hz;
4963 4963
4964 4964 /*
4965 4965 * Quick check for L2ARC devices.
4966 4966 */
4967 4967 mutex_enter(&l2arc_dev_mtx);
4968 4968 if (l2arc_ndev == 0) {
4969 4969 mutex_exit(&l2arc_dev_mtx);
4970 4970 continue;
4971 4971 }
4972 4972 mutex_exit(&l2arc_dev_mtx);
4973 4973 begin = ddi_get_lbolt();
4974 4974
4975 4975 /*
4976 4976 * This selects the next l2arc device to write to, and in
4977 4977 * doing so the next spa to feed from: dev->l2ad_spa. This
4978 4978 * will return NULL if there are now no l2arc devices or if
4979 4979 * they are all faulted.
4980 4980 *
4981 4981 * If a device is returned, its spa's config lock is also
4982 4982 * held to prevent device removal. l2arc_dev_get_next()
4983 4983 * will grab and release l2arc_dev_mtx.
4984 4984 */
4985 4985 if ((dev = l2arc_dev_get_next()) == NULL)
4986 4986 continue;
4987 4987
4988 4988 spa = dev->l2ad_spa;
4989 4989 ASSERT(spa != NULL);
4990 4990
4991 4991 /*
4992 4992 * If the pool is read-only then force the feed thread to
4993 4993 * sleep a little longer.
4994 4994 */
4995 4995 if (!spa_writeable(spa)) {
4996 4996 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4997 4997 spa_config_exit(spa, SCL_L2ARC, dev);
4998 4998 continue;
4999 4999 }
5000 5000
5001 5001 /*
5002 5002 * Avoid contributing to memory pressure.
5003 5003 */
5004 5004 if (arc_reclaim_needed()) {
5005 5005 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5006 5006 spa_config_exit(spa, SCL_L2ARC, dev);
5007 5007 continue;
5008 5008 }
5009 5009
5010 5010 ARCSTAT_BUMP(arcstat_l2_feeds);
5011 5011
5012 5012 size = l2arc_write_size();
5013 5013
5014 5014 /*
5015 5015 * Evict L2ARC buffers that will be overwritten.
5016 5016 */
5017 5017 l2arc_evict(dev, size, B_FALSE);
5018 5018
5019 5019 /*
5020 5020 * Write ARC buffers.
5021 5021 */
5022 5022 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5023 5023
5024 5024 /*
5025 5025 * Calculate interval between writes.
5026 5026 */
5027 5027 next = l2arc_write_interval(begin, size, wrote);
5028 5028 spa_config_exit(spa, SCL_L2ARC, dev);
5029 5029 }
5030 5030
5031 5031 l2arc_thread_exit = 0;
5032 5032 cv_broadcast(&l2arc_feed_thr_cv);
5033 5033 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5034 5034 thread_exit();
5035 5035 }
5036 5036
5037 5037 boolean_t
5038 5038 l2arc_vdev_present(vdev_t *vd)
5039 5039 {
5040 5040 l2arc_dev_t *dev;
5041 5041
5042 5042 mutex_enter(&l2arc_dev_mtx);
5043 5043 for (dev = list_head(l2arc_dev_list); dev != NULL;
5044 5044 dev = list_next(l2arc_dev_list, dev)) {
5045 5045 if (dev->l2ad_vdev == vd)
5046 5046 break;
5047 5047 }
5048 5048 mutex_exit(&l2arc_dev_mtx);
5049 5049
5050 5050 return (dev != NULL);
5051 5051 }
5052 5052
5053 5053 /*
5054 5054 * Add a vdev for use by the L2ARC. By this point the spa has already
5055 5055 * validated the vdev and opened it.
5056 5056 */
5057 5057 void
5058 5058 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5059 5059 {
5060 5060 l2arc_dev_t *adddev;
5061 5061
5062 5062 ASSERT(!l2arc_vdev_present(vd));
5063 5063
5064 5064 /*
5065 5065 * Create a new l2arc device entry.
5066 5066 */
5067 5067 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5068 5068 adddev->l2ad_spa = spa;
5069 5069 adddev->l2ad_vdev = vd;
5070 5070 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5071 5071 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5072 5072 adddev->l2ad_hand = adddev->l2ad_start;
5073 5073 adddev->l2ad_evict = adddev->l2ad_start;
5074 5074 adddev->l2ad_first = B_TRUE;
5075 5075 adddev->l2ad_writing = B_FALSE;
5076 5076
5077 5077 /*
5078 5078 * This is a list of all ARC buffers that are still valid on the
5079 5079 * device.
5080 5080 */
5081 5081 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5082 5082 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5083 5083 offsetof(arc_buf_hdr_t, b_l2node));
5084 5084
5085 5085 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5086 5086
5087 5087 /*
5088 5088 * Add device to global list
5089 5089 */
5090 5090 mutex_enter(&l2arc_dev_mtx);
5091 5091 list_insert_head(l2arc_dev_list, adddev);
5092 5092 atomic_inc_64(&l2arc_ndev);
5093 5093 mutex_exit(&l2arc_dev_mtx);
5094 5094 }
5095 5095
5096 5096 /*
5097 5097 * Remove a vdev from the L2ARC.
5098 5098 */
5099 5099 void
5100 5100 l2arc_remove_vdev(vdev_t *vd)
5101 5101 {
5102 5102 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5103 5103
5104 5104 /*
5105 5105 * Find the device by vdev
5106 5106 */
5107 5107 mutex_enter(&l2arc_dev_mtx);
5108 5108 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5109 5109 nextdev = list_next(l2arc_dev_list, dev);
5110 5110 if (vd == dev->l2ad_vdev) {
5111 5111 remdev = dev;
5112 5112 break;
5113 5113 }
5114 5114 }
5115 5115 ASSERT(remdev != NULL);
5116 5116
5117 5117 /*
5118 5118 * Remove device from global list
5119 5119 */
5120 5120 list_remove(l2arc_dev_list, remdev);
5121 5121 l2arc_dev_last = NULL; /* may have been invalidated */
5122 5122 atomic_dec_64(&l2arc_ndev);
5123 5123 mutex_exit(&l2arc_dev_mtx);
5124 5124
5125 5125 /*
5126 5126 * Clear all buflists and ARC references. L2ARC device flush.
5127 5127 */
5128 5128 l2arc_evict(remdev, 0, B_TRUE);
5129 5129 list_destroy(remdev->l2ad_buflist);
5130 5130 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5131 5131 kmem_free(remdev, sizeof (l2arc_dev_t));
5132 5132 }
5133 5133
5134 5134 void
5135 5135 l2arc_init(void)
5136 5136 {
5137 5137 l2arc_thread_exit = 0;
5138 5138 l2arc_ndev = 0;
5139 5139 l2arc_writes_sent = 0;
5140 5140 l2arc_writes_done = 0;
5141 5141
5142 5142 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5143 5143 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5144 5144 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5145 5145 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5146 5146 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5147 5147
5148 5148 l2arc_dev_list = &L2ARC_dev_list;
5149 5149 l2arc_free_on_write = &L2ARC_free_on_write;
5150 5150 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5151 5151 offsetof(l2arc_dev_t, l2ad_node));
5152 5152 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5153 5153 offsetof(l2arc_data_free_t, l2df_list_node));
5154 5154 }
5155 5155
5156 5156 void
5157 5157 l2arc_fini(void)
5158 5158 {
5159 5159 /*
5160 5160 * This is called from dmu_fini(), which is called from spa_fini();
5161 5161 * Because of this, we can assume that all l2arc devices have
5162 5162 * already been removed when the pools themselves were removed.
5163 5163 */
5164 5164
5165 5165 l2arc_do_free_on_write();
5166 5166
5167 5167 mutex_destroy(&l2arc_feed_thr_lock);
5168 5168 cv_destroy(&l2arc_feed_thr_cv);
5169 5169 mutex_destroy(&l2arc_dev_mtx);
5170 5170 mutex_destroy(&l2arc_buflist_mtx);
5171 5171 mutex_destroy(&l2arc_free_on_write_mtx);
5172 5172
5173 5173 list_destroy(l2arc_dev_list);
5174 5174 list_destroy(l2arc_free_on_write);
5175 5175 }
5176 5176
5177 5177 void
5178 5178 l2arc_start(void)
5179 5179 {
5180 5180 if (!(spa_mode_global & FWRITE))
5181 5181 return;
5182 5182
5183 5183 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5184 5184 TS_RUN, minclsyspri);
5185 5185 }
5186 5186
5187 5187 void
5188 5188 l2arc_stop(void)
5189 5189 {
5190 5190 if (!(spa_mode_global & FWRITE))
5191 5191 return;
5192 5192
5193 5193 mutex_enter(&l2arc_feed_thr_lock);
5194 5194 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5195 5195 l2arc_thread_exit = 1;
5196 5196 while (l2arc_thread_exit != 0)
5197 5197 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5198 5198 mutex_exit(&l2arc_feed_thr_lock);
5199 5199 }
↓ open down ↓ |
4174 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX