Print this page
arc_get_data_buf should be more aggressive in eviction when memory is unavailable
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/arc.c
+++ new/usr/src/uts/common/fs/zfs/arc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 26 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
27 27 */
28 28
29 29 /*
30 30 * DVA-based Adjustable Replacement Cache
31 31 *
32 32 * While much of the theory of operation used here is
33 33 * based on the self-tuning, low overhead replacement cache
34 34 * presented by Megiddo and Modha at FAST 2003, there are some
35 35 * significant differences:
36 36 *
37 37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 38 * Pages in its cache cannot be "locked" into memory. This makes
39 39 * the eviction algorithm simple: evict the last page in the list.
40 40 * This also make the performance characteristics easy to reason
41 41 * about. Our cache is not so simple. At any given moment, some
42 42 * subset of the blocks in the cache are un-evictable because we
43 43 * have handed out a reference to them. Blocks are only evictable
44 44 * when there are no external references active. This makes
45 45 * eviction far more problematic: we choose to evict the evictable
46 46 * blocks that are the "lowest" in the list.
47 47 *
48 48 * There are times when it is not possible to evict the requested
49 49 * space. In these circumstances we are unable to adjust the cache
50 50 * size. To prevent the cache growing unbounded at these times we
51 51 * implement a "cache throttle" that slows the flow of new data
52 52 * into the cache until we can make space available.
53 53 *
54 54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 55 * Pages are evicted when the cache is full and there is a cache
56 56 * miss. Our model has a variable sized cache. It grows with
57 57 * high use, but also tries to react to memory pressure from the
58 58 * operating system: decreasing its size when system memory is
59 59 * tight.
60 60 *
61 61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 62 * elements of the cache are therefore exactly the same size. So
63 63 * when adjusting the cache size following a cache miss, its simply
64 64 * a matter of choosing a single page to evict. In our model, we
65 65 * have variable sized cache blocks (rangeing from 512 bytes to
66 66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 67 * space for a cache miss that approximates as closely as possible
68 68 * the space used by the new block.
69 69 *
70 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 71 * by N. Megiddo & D. Modha, FAST 2003
72 72 */
73 73
74 74 /*
75 75 * The locking model:
76 76 *
77 77 * A new reference to a cache buffer can be obtained in two
78 78 * ways: 1) via a hash table lookup using the DVA as a key,
79 79 * or 2) via one of the ARC lists. The arc_read() interface
80 80 * uses method 1, while the internal arc algorithms for
81 81 * adjusting the cache use method 2. We therefore provide two
82 82 * types of locks: 1) the hash table lock array, and 2) the
83 83 * arc list locks.
84 84 *
85 85 * Buffers do not have their own mutexes, rather they rely on the
86 86 * hash table mutexes for the bulk of their protection (i.e. most
87 87 * fields in the arc_buf_hdr_t are protected by these mutexes).
88 88 *
89 89 * buf_hash_find() returns the appropriate mutex (held) when it
90 90 * locates the requested buffer in the hash table. It returns
91 91 * NULL for the mutex if the buffer was not in the table.
92 92 *
93 93 * buf_hash_remove() expects the appropriate hash mutex to be
94 94 * already held before it is invoked.
95 95 *
96 96 * Each arc state also has a mutex which is used to protect the
97 97 * buffer list associated with the state. When attempting to
98 98 * obtain a hash table lock while holding an arc list lock you
99 99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 100 * the active state mutex must be held before the ghost state mutex.
101 101 *
102 102 * Arc buffers may have an associated eviction callback function.
103 103 * This function will be invoked prior to removing the buffer (e.g.
104 104 * in arc_do_user_evicts()). Note however that the data associated
105 105 * with the buffer may be evicted prior to the callback. The callback
106 106 * must be made with *no locks held* (to prevent deadlock). Additionally,
107 107 * the users of callbacks must ensure that their private data is
108 108 * protected from simultaneous callbacks from arc_clear_callback()
109 109 * and arc_do_user_evicts().
110 110 *
111 111 * Note that the majority of the performance stats are manipulated
112 112 * with atomic operations.
113 113 *
114 114 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
115 115 *
116 116 * - L2ARC buflist creation
117 117 * - L2ARC buflist eviction
118 118 * - L2ARC write completion, which walks L2ARC buflists
119 119 * - ARC header destruction, as it removes from L2ARC buflists
120 120 * - ARC header release, as it removes from L2ARC buflists
121 121 */
122 122
123 123 #include <sys/spa.h>
124 124 #include <sys/zio.h>
125 125 #include <sys/zio_compress.h>
126 126 #include <sys/zfs_context.h>
127 127 #include <sys/arc.h>
128 128 #include <sys/refcount.h>
129 129 #include <sys/vdev.h>
130 130 #include <sys/vdev_impl.h>
131 131 #include <sys/dsl_pool.h>
132 132 #ifdef _KERNEL
133 133 #include <sys/vmsystm.h>
134 134 #include <vm/anon.h>
135 135 #include <sys/fs/swapnode.h>
136 136 #include <sys/dnlc.h>
137 137 #endif
138 138 #include <sys/callb.h>
139 139 #include <sys/kstat.h>
140 140 #include <zfs_fletcher.h>
141 141
142 142 #ifndef _KERNEL
143 143 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144 144 boolean_t arc_watch = B_FALSE;
145 145 int arc_procfd;
146 146 #endif
147 147
148 148 static kmutex_t arc_reclaim_thr_lock;
149 149 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
150 150 static uint8_t arc_thread_exit;
151 151
152 152 #define ARC_REDUCE_DNLC_PERCENT 3
153 153 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
154 154
155 155 typedef enum arc_reclaim_strategy {
156 156 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
157 157 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
158 158 } arc_reclaim_strategy_t;
159 159
160 160 /*
161 161 * The number of iterations through arc_evict_*() before we
162 162 * drop & reacquire the lock.
163 163 */
164 164 int arc_evict_iterations = 100;
165 165
166 166 /* number of seconds before growing cache again */
167 167 static int arc_grow_retry = 60;
168 168
169 169 /* shift of arc_c for calculating both min and max arc_p */
170 170 static int arc_p_min_shift = 4;
171 171
172 172 /* log2(fraction of arc to reclaim) */
173 173 static int arc_shrink_shift = 5;
174 174
175 175 /*
176 176 * minimum lifespan of a prefetch block in clock ticks
177 177 * (initialized in arc_init())
178 178 */
179 179 static int arc_min_prefetch_lifespan;
180 180
181 181 /*
182 182 * If this percent of memory is free, don't throttle.
183 183 */
184 184 int arc_lotsfree_percent = 10;
185 185
186 186 static int arc_dead;
187 187
188 188 /*
189 189 * The arc has filled available memory and has now warmed up.
190 190 */
191 191 static boolean_t arc_warm;
192 192
193 193 /*
194 194 * These tunables are for performance analysis.
195 195 */
196 196 uint64_t zfs_arc_max;
197 197 uint64_t zfs_arc_min;
198 198 uint64_t zfs_arc_meta_limit = 0;
199 199 int zfs_arc_grow_retry = 0;
200 200 int zfs_arc_shrink_shift = 0;
201 201 int zfs_arc_p_min_shift = 0;
202 202 int zfs_disable_dup_eviction = 0;
203 203 int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
204 204
205 205 /*
206 206 * Note that buffers can be in one of 6 states:
207 207 * ARC_anon - anonymous (discussed below)
208 208 * ARC_mru - recently used, currently cached
209 209 * ARC_mru_ghost - recentely used, no longer in cache
210 210 * ARC_mfu - frequently used, currently cached
211 211 * ARC_mfu_ghost - frequently used, no longer in cache
212 212 * ARC_l2c_only - exists in L2ARC but not other states
213 213 * When there are no active references to the buffer, they are
214 214 * are linked onto a list in one of these arc states. These are
215 215 * the only buffers that can be evicted or deleted. Within each
216 216 * state there are multiple lists, one for meta-data and one for
217 217 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
218 218 * etc.) is tracked separately so that it can be managed more
219 219 * explicitly: favored over data, limited explicitly.
220 220 *
221 221 * Anonymous buffers are buffers that are not associated with
222 222 * a DVA. These are buffers that hold dirty block copies
223 223 * before they are written to stable storage. By definition,
224 224 * they are "ref'd" and are considered part of arc_mru
225 225 * that cannot be freed. Generally, they will aquire a DVA
226 226 * as they are written and migrate onto the arc_mru list.
227 227 *
228 228 * The ARC_l2c_only state is for buffers that are in the second
229 229 * level ARC but no longer in any of the ARC_m* lists. The second
230 230 * level ARC itself may also contain buffers that are in any of
231 231 * the ARC_m* states - meaning that a buffer can exist in two
232 232 * places. The reason for the ARC_l2c_only state is to keep the
233 233 * buffer header in the hash table, so that reads that hit the
234 234 * second level ARC benefit from these fast lookups.
235 235 */
236 236
237 237 typedef struct arc_state {
238 238 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
239 239 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
240 240 uint64_t arcs_size; /* total amount of data in this state */
241 241 kmutex_t arcs_mtx;
242 242 } arc_state_t;
243 243
244 244 /* The 6 states: */
245 245 static arc_state_t ARC_anon;
246 246 static arc_state_t ARC_mru;
247 247 static arc_state_t ARC_mru_ghost;
248 248 static arc_state_t ARC_mfu;
249 249 static arc_state_t ARC_mfu_ghost;
250 250 static arc_state_t ARC_l2c_only;
251 251
252 252 typedef struct arc_stats {
253 253 kstat_named_t arcstat_hits;
254 254 kstat_named_t arcstat_misses;
255 255 kstat_named_t arcstat_demand_data_hits;
256 256 kstat_named_t arcstat_demand_data_misses;
257 257 kstat_named_t arcstat_demand_metadata_hits;
258 258 kstat_named_t arcstat_demand_metadata_misses;
259 259 kstat_named_t arcstat_prefetch_data_hits;
260 260 kstat_named_t arcstat_prefetch_data_misses;
261 261 kstat_named_t arcstat_prefetch_metadata_hits;
262 262 kstat_named_t arcstat_prefetch_metadata_misses;
263 263 kstat_named_t arcstat_mru_hits;
264 264 kstat_named_t arcstat_mru_ghost_hits;
265 265 kstat_named_t arcstat_mfu_hits;
266 266 kstat_named_t arcstat_mfu_ghost_hits;
267 267 kstat_named_t arcstat_deleted;
268 268 kstat_named_t arcstat_recycle_miss;
269 269 /*
270 270 * Number of buffers that could not be evicted because the hash lock
271 271 * was held by another thread. The lock may not necessarily be held
272 272 * by something using the same buffer, since hash locks are shared
273 273 * by multiple buffers.
274 274 */
275 275 kstat_named_t arcstat_mutex_miss;
276 276 /*
277 277 * Number of buffers skipped because they have I/O in progress, are
278 278 * indrect prefetch buffers that have not lived long enough, or are
279 279 * not from the spa we're trying to evict from.
280 280 */
281 281 kstat_named_t arcstat_evict_skip;
282 282 kstat_named_t arcstat_evict_l2_cached;
283 283 kstat_named_t arcstat_evict_l2_eligible;
284 284 kstat_named_t arcstat_evict_l2_ineligible;
285 285 kstat_named_t arcstat_hash_elements;
286 286 kstat_named_t arcstat_hash_elements_max;
287 287 kstat_named_t arcstat_hash_collisions;
288 288 kstat_named_t arcstat_hash_chains;
289 289 kstat_named_t arcstat_hash_chain_max;
290 290 kstat_named_t arcstat_p;
291 291 kstat_named_t arcstat_c;
292 292 kstat_named_t arcstat_c_min;
293 293 kstat_named_t arcstat_c_max;
294 294 kstat_named_t arcstat_size;
295 295 kstat_named_t arcstat_hdr_size;
296 296 kstat_named_t arcstat_data_size;
297 297 kstat_named_t arcstat_other_size;
298 298 kstat_named_t arcstat_l2_hits;
299 299 kstat_named_t arcstat_l2_misses;
300 300 kstat_named_t arcstat_l2_feeds;
301 301 kstat_named_t arcstat_l2_rw_clash;
302 302 kstat_named_t arcstat_l2_read_bytes;
303 303 kstat_named_t arcstat_l2_write_bytes;
304 304 kstat_named_t arcstat_l2_writes_sent;
305 305 kstat_named_t arcstat_l2_writes_done;
306 306 kstat_named_t arcstat_l2_writes_error;
307 307 kstat_named_t arcstat_l2_writes_hdr_miss;
308 308 kstat_named_t arcstat_l2_evict_lock_retry;
309 309 kstat_named_t arcstat_l2_evict_reading;
310 310 kstat_named_t arcstat_l2_free_on_write;
311 311 kstat_named_t arcstat_l2_abort_lowmem;
312 312 kstat_named_t arcstat_l2_cksum_bad;
313 313 kstat_named_t arcstat_l2_io_error;
314 314 kstat_named_t arcstat_l2_size;
315 315 kstat_named_t arcstat_l2_asize;
316 316 kstat_named_t arcstat_l2_hdr_size;
317 317 kstat_named_t arcstat_l2_compress_successes;
318 318 kstat_named_t arcstat_l2_compress_zeros;
319 319 kstat_named_t arcstat_l2_compress_failures;
320 320 kstat_named_t arcstat_memory_throttle_count;
321 321 kstat_named_t arcstat_duplicate_buffers;
322 322 kstat_named_t arcstat_duplicate_buffers_size;
323 323 kstat_named_t arcstat_duplicate_reads;
324 324 kstat_named_t arcstat_meta_used;
325 325 kstat_named_t arcstat_meta_limit;
326 326 kstat_named_t arcstat_meta_max;
327 327 } arc_stats_t;
328 328
329 329 static arc_stats_t arc_stats = {
330 330 { "hits", KSTAT_DATA_UINT64 },
331 331 { "misses", KSTAT_DATA_UINT64 },
332 332 { "demand_data_hits", KSTAT_DATA_UINT64 },
333 333 { "demand_data_misses", KSTAT_DATA_UINT64 },
334 334 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
335 335 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
336 336 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
337 337 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
338 338 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
339 339 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
340 340 { "mru_hits", KSTAT_DATA_UINT64 },
341 341 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
342 342 { "mfu_hits", KSTAT_DATA_UINT64 },
343 343 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
344 344 { "deleted", KSTAT_DATA_UINT64 },
345 345 { "recycle_miss", KSTAT_DATA_UINT64 },
346 346 { "mutex_miss", KSTAT_DATA_UINT64 },
347 347 { "evict_skip", KSTAT_DATA_UINT64 },
348 348 { "evict_l2_cached", KSTAT_DATA_UINT64 },
349 349 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
350 350 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
351 351 { "hash_elements", KSTAT_DATA_UINT64 },
352 352 { "hash_elements_max", KSTAT_DATA_UINT64 },
353 353 { "hash_collisions", KSTAT_DATA_UINT64 },
354 354 { "hash_chains", KSTAT_DATA_UINT64 },
355 355 { "hash_chain_max", KSTAT_DATA_UINT64 },
356 356 { "p", KSTAT_DATA_UINT64 },
357 357 { "c", KSTAT_DATA_UINT64 },
358 358 { "c_min", KSTAT_DATA_UINT64 },
359 359 { "c_max", KSTAT_DATA_UINT64 },
360 360 { "size", KSTAT_DATA_UINT64 },
361 361 { "hdr_size", KSTAT_DATA_UINT64 },
362 362 { "data_size", KSTAT_DATA_UINT64 },
363 363 { "other_size", KSTAT_DATA_UINT64 },
364 364 { "l2_hits", KSTAT_DATA_UINT64 },
365 365 { "l2_misses", KSTAT_DATA_UINT64 },
366 366 { "l2_feeds", KSTAT_DATA_UINT64 },
367 367 { "l2_rw_clash", KSTAT_DATA_UINT64 },
368 368 { "l2_read_bytes", KSTAT_DATA_UINT64 },
369 369 { "l2_write_bytes", KSTAT_DATA_UINT64 },
370 370 { "l2_writes_sent", KSTAT_DATA_UINT64 },
371 371 { "l2_writes_done", KSTAT_DATA_UINT64 },
372 372 { "l2_writes_error", KSTAT_DATA_UINT64 },
373 373 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
374 374 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
375 375 { "l2_evict_reading", KSTAT_DATA_UINT64 },
376 376 { "l2_free_on_write", KSTAT_DATA_UINT64 },
377 377 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
378 378 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
379 379 { "l2_io_error", KSTAT_DATA_UINT64 },
380 380 { "l2_size", KSTAT_DATA_UINT64 },
381 381 { "l2_asize", KSTAT_DATA_UINT64 },
382 382 { "l2_hdr_size", KSTAT_DATA_UINT64 },
383 383 { "l2_compress_successes", KSTAT_DATA_UINT64 },
384 384 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
385 385 { "l2_compress_failures", KSTAT_DATA_UINT64 },
386 386 { "memory_throttle_count", KSTAT_DATA_UINT64 },
387 387 { "duplicate_buffers", KSTAT_DATA_UINT64 },
388 388 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
389 389 { "duplicate_reads", KSTAT_DATA_UINT64 },
390 390 { "arc_meta_used", KSTAT_DATA_UINT64 },
391 391 { "arc_meta_limit", KSTAT_DATA_UINT64 },
392 392 { "arc_meta_max", KSTAT_DATA_UINT64 }
393 393 };
394 394
395 395 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
396 396
397 397 #define ARCSTAT_INCR(stat, val) \
398 398 atomic_add_64(&arc_stats.stat.value.ui64, (val))
399 399
400 400 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
401 401 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
402 402
403 403 #define ARCSTAT_MAX(stat, val) { \
404 404 uint64_t m; \
405 405 while ((val) > (m = arc_stats.stat.value.ui64) && \
406 406 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
407 407 continue; \
408 408 }
409 409
410 410 #define ARCSTAT_MAXSTAT(stat) \
411 411 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
412 412
413 413 /*
414 414 * We define a macro to allow ARC hits/misses to be easily broken down by
415 415 * two separate conditions, giving a total of four different subtypes for
416 416 * each of hits and misses (so eight statistics total).
417 417 */
418 418 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
419 419 if (cond1) { \
420 420 if (cond2) { \
421 421 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
422 422 } else { \
423 423 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
424 424 } \
425 425 } else { \
426 426 if (cond2) { \
427 427 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
428 428 } else { \
429 429 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
430 430 } \
431 431 }
432 432
433 433 kstat_t *arc_ksp;
434 434 static arc_state_t *arc_anon;
435 435 static arc_state_t *arc_mru;
436 436 static arc_state_t *arc_mru_ghost;
437 437 static arc_state_t *arc_mfu;
438 438 static arc_state_t *arc_mfu_ghost;
439 439 static arc_state_t *arc_l2c_only;
440 440
441 441 /*
442 442 * There are several ARC variables that are critical to export as kstats --
443 443 * but we don't want to have to grovel around in the kstat whenever we wish to
444 444 * manipulate them. For these variables, we therefore define them to be in
445 445 * terms of the statistic variable. This assures that we are not introducing
446 446 * the possibility of inconsistency by having shadow copies of the variables,
447 447 * while still allowing the code to be readable.
448 448 */
449 449 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
450 450 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
451 451 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
452 452 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
453 453 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
454 454 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
455 455 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
456 456 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
457 457
458 458 #define L2ARC_IS_VALID_COMPRESS(_c_) \
459 459 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
460 460
461 461 static int arc_no_grow; /* Don't try to grow cache size */
462 462 static uint64_t arc_tempreserve;
463 463 static uint64_t arc_loaned_bytes;
464 464
465 465 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
466 466
467 467 typedef struct arc_callback arc_callback_t;
468 468
469 469 struct arc_callback {
470 470 void *acb_private;
471 471 arc_done_func_t *acb_done;
472 472 arc_buf_t *acb_buf;
473 473 zio_t *acb_zio_dummy;
474 474 arc_callback_t *acb_next;
475 475 };
476 476
477 477 typedef struct arc_write_callback arc_write_callback_t;
478 478
479 479 struct arc_write_callback {
480 480 void *awcb_private;
481 481 arc_done_func_t *awcb_ready;
482 482 arc_done_func_t *awcb_physdone;
483 483 arc_done_func_t *awcb_done;
484 484 arc_buf_t *awcb_buf;
485 485 };
486 486
487 487 struct arc_buf_hdr {
488 488 /* protected by hash lock */
489 489 dva_t b_dva;
490 490 uint64_t b_birth;
491 491 uint64_t b_cksum0;
492 492
493 493 kmutex_t b_freeze_lock;
494 494 zio_cksum_t *b_freeze_cksum;
495 495 void *b_thawed;
496 496
497 497 arc_buf_hdr_t *b_hash_next;
498 498 arc_buf_t *b_buf;
499 499 uint32_t b_flags;
500 500 uint32_t b_datacnt;
501 501
502 502 arc_callback_t *b_acb;
503 503 kcondvar_t b_cv;
504 504
505 505 /* immutable */
506 506 arc_buf_contents_t b_type;
507 507 uint64_t b_size;
508 508 uint64_t b_spa;
509 509
510 510 /* protected by arc state mutex */
511 511 arc_state_t *b_state;
512 512 list_node_t b_arc_node;
513 513
514 514 /* updated atomically */
515 515 clock_t b_arc_access;
516 516
517 517 /* self protecting */
518 518 refcount_t b_refcnt;
519 519
520 520 l2arc_buf_hdr_t *b_l2hdr;
521 521 list_node_t b_l2node;
522 522 };
523 523
524 524 static arc_buf_t *arc_eviction_list;
525 525 static kmutex_t arc_eviction_mtx;
526 526 static arc_buf_hdr_t arc_eviction_hdr;
527 527 static void arc_get_data_buf(arc_buf_t *buf);
528 528 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
529 529 static int arc_evict_needed(arc_buf_contents_t type);
530 530 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
531 531 static void arc_buf_watch(arc_buf_t *buf);
532 532
533 533 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
534 534
535 535 #define GHOST_STATE(state) \
536 536 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
537 537 (state) == arc_l2c_only)
538 538
539 539 /*
540 540 * Private ARC flags. These flags are private ARC only flags that will show up
541 541 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
542 542 * be passed in as arc_flags in things like arc_read. However, these flags
543 543 * should never be passed and should only be set by ARC code. When adding new
544 544 * public flags, make sure not to smash the private ones.
545 545 */
546 546
547 547 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
548 548 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
549 549 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
550 550 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
551 551 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
552 552 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
553 553 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
554 554 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
555 555 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
556 556 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
557 557
558 558 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
559 559 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
560 560 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
561 561 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
562 562 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
563 563 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
564 564 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
565 565 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
566 566 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
567 567 (hdr)->b_l2hdr != NULL)
568 568 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
569 569 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
570 570 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
571 571
572 572 /*
573 573 * Other sizes
574 574 */
575 575
576 576 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
577 577 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
578 578
579 579 /*
580 580 * Hash table routines
581 581 */
582 582
583 583 #define HT_LOCK_PAD 64
584 584
585 585 struct ht_lock {
586 586 kmutex_t ht_lock;
587 587 #ifdef _KERNEL
588 588 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
589 589 #endif
590 590 };
591 591
592 592 #define BUF_LOCKS 256
593 593 typedef struct buf_hash_table {
594 594 uint64_t ht_mask;
595 595 arc_buf_hdr_t **ht_table;
596 596 struct ht_lock ht_locks[BUF_LOCKS];
597 597 } buf_hash_table_t;
598 598
599 599 static buf_hash_table_t buf_hash_table;
600 600
601 601 #define BUF_HASH_INDEX(spa, dva, birth) \
602 602 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
603 603 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
604 604 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
605 605 #define HDR_LOCK(hdr) \
606 606 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
607 607
608 608 uint64_t zfs_crc64_table[256];
609 609
610 610 /*
611 611 * Level 2 ARC
612 612 */
613 613
614 614 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
615 615 #define L2ARC_HEADROOM 2 /* num of writes */
616 616 /*
617 617 * If we discover during ARC scan any buffers to be compressed, we boost
618 618 * our headroom for the next scanning cycle by this percentage multiple.
619 619 */
620 620 #define L2ARC_HEADROOM_BOOST 200
621 621 #define L2ARC_FEED_SECS 1 /* caching interval secs */
622 622 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
623 623
624 624 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
625 625 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
626 626
627 627 /* L2ARC Performance Tunables */
628 628 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
629 629 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
630 630 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
631 631 uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
632 632 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
633 633 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
634 634 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
635 635 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
636 636 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
637 637
638 638 /*
639 639 * L2ARC Internals
640 640 */
641 641 typedef struct l2arc_dev {
642 642 vdev_t *l2ad_vdev; /* vdev */
643 643 spa_t *l2ad_spa; /* spa */
644 644 uint64_t l2ad_hand; /* next write location */
645 645 uint64_t l2ad_start; /* first addr on device */
646 646 uint64_t l2ad_end; /* last addr on device */
647 647 uint64_t l2ad_evict; /* last addr eviction reached */
648 648 boolean_t l2ad_first; /* first sweep through */
649 649 boolean_t l2ad_writing; /* currently writing */
650 650 list_t *l2ad_buflist; /* buffer list */
651 651 list_node_t l2ad_node; /* device list node */
652 652 } l2arc_dev_t;
653 653
654 654 static list_t L2ARC_dev_list; /* device list */
655 655 static list_t *l2arc_dev_list; /* device list pointer */
656 656 static kmutex_t l2arc_dev_mtx; /* device list mutex */
657 657 static l2arc_dev_t *l2arc_dev_last; /* last device used */
658 658 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
659 659 static list_t L2ARC_free_on_write; /* free after write buf list */
660 660 static list_t *l2arc_free_on_write; /* free after write list ptr */
661 661 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
662 662 static uint64_t l2arc_ndev; /* number of devices */
663 663
664 664 typedef struct l2arc_read_callback {
665 665 arc_buf_t *l2rcb_buf; /* read buffer */
666 666 spa_t *l2rcb_spa; /* spa */
667 667 blkptr_t l2rcb_bp; /* original blkptr */
668 668 zbookmark_phys_t l2rcb_zb; /* original bookmark */
669 669 int l2rcb_flags; /* original flags */
670 670 enum zio_compress l2rcb_compress; /* applied compress */
671 671 } l2arc_read_callback_t;
672 672
673 673 typedef struct l2arc_write_callback {
674 674 l2arc_dev_t *l2wcb_dev; /* device info */
675 675 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
676 676 } l2arc_write_callback_t;
677 677
678 678 struct l2arc_buf_hdr {
679 679 /* protected by arc_buf_hdr mutex */
680 680 l2arc_dev_t *b_dev; /* L2ARC device */
681 681 uint64_t b_daddr; /* disk address, offset byte */
682 682 /* compression applied to buffer data */
683 683 enum zio_compress b_compress;
684 684 /* real alloc'd buffer size depending on b_compress applied */
685 685 int b_asize;
686 686 /* temporary buffer holder for in-flight compressed data */
687 687 void *b_tmp_cdata;
688 688 };
689 689
690 690 typedef struct l2arc_data_free {
691 691 /* protected by l2arc_free_on_write_mtx */
692 692 void *l2df_data;
693 693 size_t l2df_size;
694 694 void (*l2df_func)(void *, size_t);
695 695 list_node_t l2df_list_node;
696 696 } l2arc_data_free_t;
697 697
698 698 static kmutex_t l2arc_feed_thr_lock;
699 699 static kcondvar_t l2arc_feed_thr_cv;
700 700 static uint8_t l2arc_thread_exit;
701 701
702 702 static void l2arc_read_done(zio_t *zio);
703 703 static void l2arc_hdr_stat_add(void);
704 704 static void l2arc_hdr_stat_remove(void);
705 705
706 706 static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
707 707 static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
708 708 enum zio_compress c);
709 709 static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
710 710
711 711 static uint64_t
712 712 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
713 713 {
714 714 uint8_t *vdva = (uint8_t *)dva;
715 715 uint64_t crc = -1ULL;
716 716 int i;
717 717
718 718 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
719 719
720 720 for (i = 0; i < sizeof (dva_t); i++)
721 721 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
722 722
723 723 crc ^= (spa>>8) ^ birth;
724 724
725 725 return (crc);
726 726 }
727 727
728 728 #define BUF_EMPTY(buf) \
729 729 ((buf)->b_dva.dva_word[0] == 0 && \
730 730 (buf)->b_dva.dva_word[1] == 0 && \
731 731 (buf)->b_cksum0 == 0)
732 732
733 733 #define BUF_EQUAL(spa, dva, birth, buf) \
734 734 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
735 735 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
736 736 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
737 737
738 738 static void
739 739 buf_discard_identity(arc_buf_hdr_t *hdr)
740 740 {
741 741 hdr->b_dva.dva_word[0] = 0;
742 742 hdr->b_dva.dva_word[1] = 0;
743 743 hdr->b_birth = 0;
744 744 hdr->b_cksum0 = 0;
745 745 }
746 746
747 747 static arc_buf_hdr_t *
748 748 buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
749 749 {
750 750 const dva_t *dva = BP_IDENTITY(bp);
751 751 uint64_t birth = BP_PHYSICAL_BIRTH(bp);
752 752 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
753 753 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
754 754 arc_buf_hdr_t *buf;
755 755
756 756 mutex_enter(hash_lock);
757 757 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
758 758 buf = buf->b_hash_next) {
759 759 if (BUF_EQUAL(spa, dva, birth, buf)) {
760 760 *lockp = hash_lock;
761 761 return (buf);
762 762 }
763 763 }
764 764 mutex_exit(hash_lock);
765 765 *lockp = NULL;
766 766 return (NULL);
767 767 }
768 768
769 769 /*
770 770 * Insert an entry into the hash table. If there is already an element
771 771 * equal to elem in the hash table, then the already existing element
772 772 * will be returned and the new element will not be inserted.
773 773 * Otherwise returns NULL.
774 774 */
775 775 static arc_buf_hdr_t *
776 776 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
777 777 {
778 778 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
779 779 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
780 780 arc_buf_hdr_t *fbuf;
781 781 uint32_t i;
782 782
783 783 ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
784 784 ASSERT(buf->b_birth != 0);
785 785 ASSERT(!HDR_IN_HASH_TABLE(buf));
786 786 *lockp = hash_lock;
787 787 mutex_enter(hash_lock);
788 788 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
789 789 fbuf = fbuf->b_hash_next, i++) {
790 790 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
791 791 return (fbuf);
792 792 }
793 793
794 794 buf->b_hash_next = buf_hash_table.ht_table[idx];
795 795 buf_hash_table.ht_table[idx] = buf;
796 796 buf->b_flags |= ARC_IN_HASH_TABLE;
797 797
798 798 /* collect some hash table performance data */
799 799 if (i > 0) {
800 800 ARCSTAT_BUMP(arcstat_hash_collisions);
801 801 if (i == 1)
802 802 ARCSTAT_BUMP(arcstat_hash_chains);
803 803
804 804 ARCSTAT_MAX(arcstat_hash_chain_max, i);
805 805 }
806 806
807 807 ARCSTAT_BUMP(arcstat_hash_elements);
808 808 ARCSTAT_MAXSTAT(arcstat_hash_elements);
809 809
810 810 return (NULL);
811 811 }
812 812
813 813 static void
814 814 buf_hash_remove(arc_buf_hdr_t *buf)
815 815 {
816 816 arc_buf_hdr_t *fbuf, **bufp;
817 817 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
818 818
819 819 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
820 820 ASSERT(HDR_IN_HASH_TABLE(buf));
821 821
822 822 bufp = &buf_hash_table.ht_table[idx];
823 823 while ((fbuf = *bufp) != buf) {
824 824 ASSERT(fbuf != NULL);
825 825 bufp = &fbuf->b_hash_next;
826 826 }
827 827 *bufp = buf->b_hash_next;
828 828 buf->b_hash_next = NULL;
829 829 buf->b_flags &= ~ARC_IN_HASH_TABLE;
830 830
831 831 /* collect some hash table performance data */
832 832 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
833 833
834 834 if (buf_hash_table.ht_table[idx] &&
835 835 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
836 836 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
837 837 }
838 838
839 839 /*
840 840 * Global data structures and functions for the buf kmem cache.
841 841 */
842 842 static kmem_cache_t *hdr_cache;
843 843 static kmem_cache_t *buf_cache;
844 844
845 845 static void
846 846 buf_fini(void)
847 847 {
848 848 int i;
849 849
850 850 kmem_free(buf_hash_table.ht_table,
851 851 (buf_hash_table.ht_mask + 1) * sizeof (void *));
852 852 for (i = 0; i < BUF_LOCKS; i++)
853 853 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
854 854 kmem_cache_destroy(hdr_cache);
855 855 kmem_cache_destroy(buf_cache);
856 856 }
857 857
858 858 /*
859 859 * Constructor callback - called when the cache is empty
860 860 * and a new buf is requested.
861 861 */
862 862 /* ARGSUSED */
863 863 static int
864 864 hdr_cons(void *vbuf, void *unused, int kmflag)
865 865 {
866 866 arc_buf_hdr_t *buf = vbuf;
867 867
868 868 bzero(buf, sizeof (arc_buf_hdr_t));
869 869 refcount_create(&buf->b_refcnt);
870 870 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
871 871 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
872 872 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
873 873
874 874 return (0);
875 875 }
876 876
877 877 /* ARGSUSED */
878 878 static int
879 879 buf_cons(void *vbuf, void *unused, int kmflag)
880 880 {
881 881 arc_buf_t *buf = vbuf;
882 882
883 883 bzero(buf, sizeof (arc_buf_t));
884 884 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
885 885 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
886 886
887 887 return (0);
888 888 }
889 889
890 890 /*
891 891 * Destructor callback - called when a cached buf is
892 892 * no longer required.
893 893 */
894 894 /* ARGSUSED */
895 895 static void
896 896 hdr_dest(void *vbuf, void *unused)
897 897 {
898 898 arc_buf_hdr_t *buf = vbuf;
899 899
900 900 ASSERT(BUF_EMPTY(buf));
901 901 refcount_destroy(&buf->b_refcnt);
902 902 cv_destroy(&buf->b_cv);
903 903 mutex_destroy(&buf->b_freeze_lock);
904 904 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
905 905 }
906 906
907 907 /* ARGSUSED */
908 908 static void
909 909 buf_dest(void *vbuf, void *unused)
910 910 {
911 911 arc_buf_t *buf = vbuf;
912 912
913 913 mutex_destroy(&buf->b_evict_lock);
914 914 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
915 915 }
916 916
917 917 /*
918 918 * Reclaim callback -- invoked when memory is low.
919 919 */
920 920 /* ARGSUSED */
921 921 static void
922 922 hdr_recl(void *unused)
923 923 {
924 924 dprintf("hdr_recl called\n");
925 925 /*
926 926 * umem calls the reclaim func when we destroy the buf cache,
927 927 * which is after we do arc_fini().
928 928 */
929 929 if (!arc_dead)
930 930 cv_signal(&arc_reclaim_thr_cv);
931 931 }
932 932
933 933 static void
934 934 buf_init(void)
935 935 {
936 936 uint64_t *ct;
937 937 uint64_t hsize = 1ULL << 12;
938 938 int i, j;
939 939
940 940 /*
941 941 * The hash table is big enough to fill all of physical memory
942 942 * with an average block size of zfs_arc_average_blocksize (default 8K).
943 943 * By default, the table will take up
944 944 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
945 945 */
946 946 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
947 947 hsize <<= 1;
948 948 retry:
949 949 buf_hash_table.ht_mask = hsize - 1;
950 950 buf_hash_table.ht_table =
951 951 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
952 952 if (buf_hash_table.ht_table == NULL) {
953 953 ASSERT(hsize > (1ULL << 8));
954 954 hsize >>= 1;
955 955 goto retry;
956 956 }
957 957
958 958 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
959 959 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
960 960 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
961 961 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
962 962
963 963 for (i = 0; i < 256; i++)
964 964 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
965 965 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
966 966
967 967 for (i = 0; i < BUF_LOCKS; i++) {
968 968 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
969 969 NULL, MUTEX_DEFAULT, NULL);
970 970 }
971 971 }
972 972
973 973 #define ARC_MINTIME (hz>>4) /* 62 ms */
974 974
975 975 static void
976 976 arc_cksum_verify(arc_buf_t *buf)
977 977 {
978 978 zio_cksum_t zc;
979 979
980 980 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
981 981 return;
982 982
983 983 mutex_enter(&buf->b_hdr->b_freeze_lock);
984 984 if (buf->b_hdr->b_freeze_cksum == NULL ||
985 985 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
986 986 mutex_exit(&buf->b_hdr->b_freeze_lock);
987 987 return;
988 988 }
989 989 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
990 990 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
991 991 panic("buffer modified while frozen!");
992 992 mutex_exit(&buf->b_hdr->b_freeze_lock);
993 993 }
994 994
995 995 static int
996 996 arc_cksum_equal(arc_buf_t *buf)
997 997 {
998 998 zio_cksum_t zc;
999 999 int equal;
1000 1000
1001 1001 mutex_enter(&buf->b_hdr->b_freeze_lock);
1002 1002 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1003 1003 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1004 1004 mutex_exit(&buf->b_hdr->b_freeze_lock);
1005 1005
1006 1006 return (equal);
1007 1007 }
1008 1008
1009 1009 static void
1010 1010 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1011 1011 {
1012 1012 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1013 1013 return;
1014 1014
1015 1015 mutex_enter(&buf->b_hdr->b_freeze_lock);
1016 1016 if (buf->b_hdr->b_freeze_cksum != NULL) {
1017 1017 mutex_exit(&buf->b_hdr->b_freeze_lock);
1018 1018 return;
1019 1019 }
1020 1020 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1021 1021 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1022 1022 buf->b_hdr->b_freeze_cksum);
1023 1023 mutex_exit(&buf->b_hdr->b_freeze_lock);
1024 1024 arc_buf_watch(buf);
1025 1025 }
1026 1026
1027 1027 #ifndef _KERNEL
1028 1028 typedef struct procctl {
1029 1029 long cmd;
1030 1030 prwatch_t prwatch;
1031 1031 } procctl_t;
1032 1032 #endif
1033 1033
1034 1034 /* ARGSUSED */
1035 1035 static void
1036 1036 arc_buf_unwatch(arc_buf_t *buf)
1037 1037 {
1038 1038 #ifndef _KERNEL
1039 1039 if (arc_watch) {
1040 1040 int result;
1041 1041 procctl_t ctl;
1042 1042 ctl.cmd = PCWATCH;
1043 1043 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1044 1044 ctl.prwatch.pr_size = 0;
1045 1045 ctl.prwatch.pr_wflags = 0;
1046 1046 result = write(arc_procfd, &ctl, sizeof (ctl));
1047 1047 ASSERT3U(result, ==, sizeof (ctl));
1048 1048 }
1049 1049 #endif
1050 1050 }
1051 1051
1052 1052 /* ARGSUSED */
1053 1053 static void
1054 1054 arc_buf_watch(arc_buf_t *buf)
1055 1055 {
1056 1056 #ifndef _KERNEL
1057 1057 if (arc_watch) {
1058 1058 int result;
1059 1059 procctl_t ctl;
1060 1060 ctl.cmd = PCWATCH;
1061 1061 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1062 1062 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1063 1063 ctl.prwatch.pr_wflags = WA_WRITE;
1064 1064 result = write(arc_procfd, &ctl, sizeof (ctl));
1065 1065 ASSERT3U(result, ==, sizeof (ctl));
1066 1066 }
1067 1067 #endif
1068 1068 }
1069 1069
1070 1070 void
1071 1071 arc_buf_thaw(arc_buf_t *buf)
1072 1072 {
1073 1073 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1074 1074 if (buf->b_hdr->b_state != arc_anon)
1075 1075 panic("modifying non-anon buffer!");
1076 1076 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1077 1077 panic("modifying buffer while i/o in progress!");
1078 1078 arc_cksum_verify(buf);
1079 1079 }
1080 1080
1081 1081 mutex_enter(&buf->b_hdr->b_freeze_lock);
1082 1082 if (buf->b_hdr->b_freeze_cksum != NULL) {
1083 1083 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1084 1084 buf->b_hdr->b_freeze_cksum = NULL;
1085 1085 }
1086 1086
1087 1087 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1088 1088 if (buf->b_hdr->b_thawed)
1089 1089 kmem_free(buf->b_hdr->b_thawed, 1);
1090 1090 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1091 1091 }
1092 1092
1093 1093 mutex_exit(&buf->b_hdr->b_freeze_lock);
1094 1094
1095 1095 arc_buf_unwatch(buf);
1096 1096 }
1097 1097
1098 1098 void
1099 1099 arc_buf_freeze(arc_buf_t *buf)
1100 1100 {
1101 1101 kmutex_t *hash_lock;
1102 1102
1103 1103 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1104 1104 return;
1105 1105
1106 1106 hash_lock = HDR_LOCK(buf->b_hdr);
1107 1107 mutex_enter(hash_lock);
1108 1108
1109 1109 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1110 1110 buf->b_hdr->b_state == arc_anon);
1111 1111 arc_cksum_compute(buf, B_FALSE);
1112 1112 mutex_exit(hash_lock);
1113 1113
1114 1114 }
1115 1115
1116 1116 static void
1117 1117 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1118 1118 {
1119 1119 ASSERT(MUTEX_HELD(hash_lock));
1120 1120
1121 1121 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1122 1122 (ab->b_state != arc_anon)) {
1123 1123 uint64_t delta = ab->b_size * ab->b_datacnt;
1124 1124 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1125 1125 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1126 1126
1127 1127 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1128 1128 mutex_enter(&ab->b_state->arcs_mtx);
1129 1129 ASSERT(list_link_active(&ab->b_arc_node));
1130 1130 list_remove(list, ab);
1131 1131 if (GHOST_STATE(ab->b_state)) {
1132 1132 ASSERT0(ab->b_datacnt);
1133 1133 ASSERT3P(ab->b_buf, ==, NULL);
1134 1134 delta = ab->b_size;
1135 1135 }
1136 1136 ASSERT(delta > 0);
1137 1137 ASSERT3U(*size, >=, delta);
1138 1138 atomic_add_64(size, -delta);
1139 1139 mutex_exit(&ab->b_state->arcs_mtx);
1140 1140 /* remove the prefetch flag if we get a reference */
1141 1141 if (ab->b_flags & ARC_PREFETCH)
1142 1142 ab->b_flags &= ~ARC_PREFETCH;
1143 1143 }
1144 1144 }
1145 1145
1146 1146 static int
1147 1147 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1148 1148 {
1149 1149 int cnt;
1150 1150 arc_state_t *state = ab->b_state;
1151 1151
1152 1152 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1153 1153 ASSERT(!GHOST_STATE(state));
1154 1154
1155 1155 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1156 1156 (state != arc_anon)) {
1157 1157 uint64_t *size = &state->arcs_lsize[ab->b_type];
1158 1158
1159 1159 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1160 1160 mutex_enter(&state->arcs_mtx);
1161 1161 ASSERT(!list_link_active(&ab->b_arc_node));
1162 1162 list_insert_head(&state->arcs_list[ab->b_type], ab);
1163 1163 ASSERT(ab->b_datacnt > 0);
1164 1164 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1165 1165 mutex_exit(&state->arcs_mtx);
1166 1166 }
1167 1167 return (cnt);
1168 1168 }
1169 1169
1170 1170 /*
1171 1171 * Move the supplied buffer to the indicated state. The mutex
1172 1172 * for the buffer must be held by the caller.
1173 1173 */
1174 1174 static void
1175 1175 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1176 1176 {
1177 1177 arc_state_t *old_state = ab->b_state;
1178 1178 int64_t refcnt = refcount_count(&ab->b_refcnt);
1179 1179 uint64_t from_delta, to_delta;
1180 1180
1181 1181 ASSERT(MUTEX_HELD(hash_lock));
1182 1182 ASSERT3P(new_state, !=, old_state);
1183 1183 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1184 1184 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1185 1185 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1186 1186
1187 1187 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1188 1188
1189 1189 /*
1190 1190 * If this buffer is evictable, transfer it from the
1191 1191 * old state list to the new state list.
1192 1192 */
1193 1193 if (refcnt == 0) {
1194 1194 if (old_state != arc_anon) {
1195 1195 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1196 1196 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1197 1197
1198 1198 if (use_mutex)
1199 1199 mutex_enter(&old_state->arcs_mtx);
1200 1200
1201 1201 ASSERT(list_link_active(&ab->b_arc_node));
1202 1202 list_remove(&old_state->arcs_list[ab->b_type], ab);
1203 1203
1204 1204 /*
1205 1205 * If prefetching out of the ghost cache,
1206 1206 * we will have a non-zero datacnt.
1207 1207 */
1208 1208 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1209 1209 /* ghost elements have a ghost size */
1210 1210 ASSERT(ab->b_buf == NULL);
1211 1211 from_delta = ab->b_size;
1212 1212 }
1213 1213 ASSERT3U(*size, >=, from_delta);
1214 1214 atomic_add_64(size, -from_delta);
1215 1215
1216 1216 if (use_mutex)
1217 1217 mutex_exit(&old_state->arcs_mtx);
1218 1218 }
1219 1219 if (new_state != arc_anon) {
1220 1220 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1221 1221 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1222 1222
1223 1223 if (use_mutex)
1224 1224 mutex_enter(&new_state->arcs_mtx);
1225 1225
1226 1226 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1227 1227
1228 1228 /* ghost elements have a ghost size */
1229 1229 if (GHOST_STATE(new_state)) {
1230 1230 ASSERT(ab->b_datacnt == 0);
1231 1231 ASSERT(ab->b_buf == NULL);
1232 1232 to_delta = ab->b_size;
1233 1233 }
1234 1234 atomic_add_64(size, to_delta);
1235 1235
1236 1236 if (use_mutex)
1237 1237 mutex_exit(&new_state->arcs_mtx);
1238 1238 }
1239 1239 }
1240 1240
1241 1241 ASSERT(!BUF_EMPTY(ab));
1242 1242 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1243 1243 buf_hash_remove(ab);
1244 1244
1245 1245 /* adjust state sizes */
1246 1246 if (to_delta)
1247 1247 atomic_add_64(&new_state->arcs_size, to_delta);
1248 1248 if (from_delta) {
1249 1249 ASSERT3U(old_state->arcs_size, >=, from_delta);
1250 1250 atomic_add_64(&old_state->arcs_size, -from_delta);
1251 1251 }
1252 1252 ab->b_state = new_state;
1253 1253
1254 1254 /* adjust l2arc hdr stats */
1255 1255 if (new_state == arc_l2c_only)
1256 1256 l2arc_hdr_stat_add();
1257 1257 else if (old_state == arc_l2c_only)
1258 1258 l2arc_hdr_stat_remove();
1259 1259 }
1260 1260
1261 1261 void
1262 1262 arc_space_consume(uint64_t space, arc_space_type_t type)
1263 1263 {
1264 1264 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1265 1265
1266 1266 switch (type) {
1267 1267 case ARC_SPACE_DATA:
1268 1268 ARCSTAT_INCR(arcstat_data_size, space);
1269 1269 break;
1270 1270 case ARC_SPACE_OTHER:
1271 1271 ARCSTAT_INCR(arcstat_other_size, space);
1272 1272 break;
1273 1273 case ARC_SPACE_HDRS:
1274 1274 ARCSTAT_INCR(arcstat_hdr_size, space);
1275 1275 break;
1276 1276 case ARC_SPACE_L2HDRS:
1277 1277 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1278 1278 break;
1279 1279 }
1280 1280
1281 1281 ARCSTAT_INCR(arcstat_meta_used, space);
1282 1282 atomic_add_64(&arc_size, space);
1283 1283 }
1284 1284
1285 1285 void
1286 1286 arc_space_return(uint64_t space, arc_space_type_t type)
1287 1287 {
1288 1288 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1289 1289
1290 1290 switch (type) {
1291 1291 case ARC_SPACE_DATA:
1292 1292 ARCSTAT_INCR(arcstat_data_size, -space);
1293 1293 break;
1294 1294 case ARC_SPACE_OTHER:
1295 1295 ARCSTAT_INCR(arcstat_other_size, -space);
1296 1296 break;
1297 1297 case ARC_SPACE_HDRS:
1298 1298 ARCSTAT_INCR(arcstat_hdr_size, -space);
1299 1299 break;
1300 1300 case ARC_SPACE_L2HDRS:
1301 1301 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1302 1302 break;
1303 1303 }
1304 1304
1305 1305 ASSERT(arc_meta_used >= space);
1306 1306 if (arc_meta_max < arc_meta_used)
1307 1307 arc_meta_max = arc_meta_used;
1308 1308 ARCSTAT_INCR(arcstat_meta_used, -space);
1309 1309 ASSERT(arc_size >= space);
1310 1310 atomic_add_64(&arc_size, -space);
1311 1311 }
1312 1312
1313 1313 void *
1314 1314 arc_data_buf_alloc(uint64_t size)
1315 1315 {
1316 1316 if (arc_evict_needed(ARC_BUFC_DATA))
1317 1317 cv_signal(&arc_reclaim_thr_cv);
1318 1318 atomic_add_64(&arc_size, size);
1319 1319 return (zio_data_buf_alloc(size));
1320 1320 }
1321 1321
1322 1322 void
1323 1323 arc_data_buf_free(void *buf, uint64_t size)
1324 1324 {
1325 1325 zio_data_buf_free(buf, size);
1326 1326 ASSERT(arc_size >= size);
1327 1327 atomic_add_64(&arc_size, -size);
1328 1328 }
1329 1329
1330 1330 arc_buf_t *
1331 1331 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1332 1332 {
1333 1333 arc_buf_hdr_t *hdr;
1334 1334 arc_buf_t *buf;
1335 1335
1336 1336 ASSERT3U(size, >, 0);
1337 1337 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1338 1338 ASSERT(BUF_EMPTY(hdr));
1339 1339 hdr->b_size = size;
1340 1340 hdr->b_type = type;
1341 1341 hdr->b_spa = spa_load_guid(spa);
1342 1342 hdr->b_state = arc_anon;
1343 1343 hdr->b_arc_access = 0;
1344 1344 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1345 1345 buf->b_hdr = hdr;
1346 1346 buf->b_data = NULL;
1347 1347 buf->b_efunc = NULL;
1348 1348 buf->b_private = NULL;
1349 1349 buf->b_next = NULL;
1350 1350 hdr->b_buf = buf;
1351 1351 arc_get_data_buf(buf);
1352 1352 hdr->b_datacnt = 1;
1353 1353 hdr->b_flags = 0;
1354 1354 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1355 1355 (void) refcount_add(&hdr->b_refcnt, tag);
1356 1356
1357 1357 return (buf);
1358 1358 }
1359 1359
1360 1360 static char *arc_onloan_tag = "onloan";
1361 1361
1362 1362 /*
1363 1363 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1364 1364 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1365 1365 * buffers must be returned to the arc before they can be used by the DMU or
1366 1366 * freed.
1367 1367 */
1368 1368 arc_buf_t *
1369 1369 arc_loan_buf(spa_t *spa, int size)
1370 1370 {
1371 1371 arc_buf_t *buf;
1372 1372
1373 1373 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1374 1374
1375 1375 atomic_add_64(&arc_loaned_bytes, size);
1376 1376 return (buf);
1377 1377 }
1378 1378
1379 1379 /*
1380 1380 * Return a loaned arc buffer to the arc.
1381 1381 */
1382 1382 void
1383 1383 arc_return_buf(arc_buf_t *buf, void *tag)
1384 1384 {
1385 1385 arc_buf_hdr_t *hdr = buf->b_hdr;
1386 1386
1387 1387 ASSERT(buf->b_data != NULL);
1388 1388 (void) refcount_add(&hdr->b_refcnt, tag);
1389 1389 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1390 1390
1391 1391 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1392 1392 }
1393 1393
1394 1394 /* Detach an arc_buf from a dbuf (tag) */
1395 1395 void
1396 1396 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1397 1397 {
1398 1398 arc_buf_hdr_t *hdr;
1399 1399
1400 1400 ASSERT(buf->b_data != NULL);
1401 1401 hdr = buf->b_hdr;
1402 1402 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1403 1403 (void) refcount_remove(&hdr->b_refcnt, tag);
1404 1404 buf->b_efunc = NULL;
1405 1405 buf->b_private = NULL;
1406 1406
1407 1407 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1408 1408 }
1409 1409
1410 1410 static arc_buf_t *
1411 1411 arc_buf_clone(arc_buf_t *from)
1412 1412 {
1413 1413 arc_buf_t *buf;
1414 1414 arc_buf_hdr_t *hdr = from->b_hdr;
1415 1415 uint64_t size = hdr->b_size;
1416 1416
1417 1417 ASSERT(hdr->b_state != arc_anon);
1418 1418
1419 1419 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1420 1420 buf->b_hdr = hdr;
1421 1421 buf->b_data = NULL;
1422 1422 buf->b_efunc = NULL;
1423 1423 buf->b_private = NULL;
1424 1424 buf->b_next = hdr->b_buf;
1425 1425 hdr->b_buf = buf;
1426 1426 arc_get_data_buf(buf);
1427 1427 bcopy(from->b_data, buf->b_data, size);
1428 1428
1429 1429 /*
1430 1430 * This buffer already exists in the arc so create a duplicate
1431 1431 * copy for the caller. If the buffer is associated with user data
1432 1432 * then track the size and number of duplicates. These stats will be
1433 1433 * updated as duplicate buffers are created and destroyed.
1434 1434 */
1435 1435 if (hdr->b_type == ARC_BUFC_DATA) {
1436 1436 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1437 1437 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1438 1438 }
1439 1439 hdr->b_datacnt += 1;
1440 1440 return (buf);
1441 1441 }
1442 1442
1443 1443 void
1444 1444 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1445 1445 {
1446 1446 arc_buf_hdr_t *hdr;
1447 1447 kmutex_t *hash_lock;
1448 1448
1449 1449 /*
1450 1450 * Check to see if this buffer is evicted. Callers
1451 1451 * must verify b_data != NULL to know if the add_ref
1452 1452 * was successful.
1453 1453 */
1454 1454 mutex_enter(&buf->b_evict_lock);
1455 1455 if (buf->b_data == NULL) {
1456 1456 mutex_exit(&buf->b_evict_lock);
1457 1457 return;
1458 1458 }
1459 1459 hash_lock = HDR_LOCK(buf->b_hdr);
1460 1460 mutex_enter(hash_lock);
1461 1461 hdr = buf->b_hdr;
1462 1462 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1463 1463 mutex_exit(&buf->b_evict_lock);
1464 1464
1465 1465 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1466 1466 add_reference(hdr, hash_lock, tag);
1467 1467 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1468 1468 arc_access(hdr, hash_lock);
1469 1469 mutex_exit(hash_lock);
1470 1470 ARCSTAT_BUMP(arcstat_hits);
1471 1471 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1472 1472 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1473 1473 data, metadata, hits);
1474 1474 }
1475 1475
1476 1476 /*
1477 1477 * Free the arc data buffer. If it is an l2arc write in progress,
1478 1478 * the buffer is placed on l2arc_free_on_write to be freed later.
1479 1479 */
1480 1480 static void
1481 1481 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1482 1482 {
1483 1483 arc_buf_hdr_t *hdr = buf->b_hdr;
1484 1484
1485 1485 if (HDR_L2_WRITING(hdr)) {
1486 1486 l2arc_data_free_t *df;
1487 1487 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1488 1488 df->l2df_data = buf->b_data;
1489 1489 df->l2df_size = hdr->b_size;
1490 1490 df->l2df_func = free_func;
1491 1491 mutex_enter(&l2arc_free_on_write_mtx);
1492 1492 list_insert_head(l2arc_free_on_write, df);
1493 1493 mutex_exit(&l2arc_free_on_write_mtx);
1494 1494 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1495 1495 } else {
1496 1496 free_func(buf->b_data, hdr->b_size);
1497 1497 }
1498 1498 }
1499 1499
1500 1500 /*
1501 1501 * Free up buf->b_data and if 'remove' is set, then pull the
1502 1502 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
1503 1503 */
1504 1504 static void
1505 1505 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
1506 1506 {
1507 1507 arc_buf_t **bufp;
1508 1508
1509 1509 /* free up data associated with the buf */
1510 1510 if (buf->b_data) {
1511 1511 arc_state_t *state = buf->b_hdr->b_state;
1512 1512 uint64_t size = buf->b_hdr->b_size;
1513 1513 arc_buf_contents_t type = buf->b_hdr->b_type;
1514 1514
1515 1515 arc_cksum_verify(buf);
1516 1516 arc_buf_unwatch(buf);
1517 1517
1518 1518 if (!recycle) {
1519 1519 if (type == ARC_BUFC_METADATA) {
1520 1520 arc_buf_data_free(buf, zio_buf_free);
1521 1521 arc_space_return(size, ARC_SPACE_DATA);
1522 1522 } else {
1523 1523 ASSERT(type == ARC_BUFC_DATA);
1524 1524 arc_buf_data_free(buf, zio_data_buf_free);
1525 1525 ARCSTAT_INCR(arcstat_data_size, -size);
1526 1526 atomic_add_64(&arc_size, -size);
1527 1527 }
1528 1528 }
1529 1529 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1530 1530 uint64_t *cnt = &state->arcs_lsize[type];
1531 1531
1532 1532 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1533 1533 ASSERT(state != arc_anon);
1534 1534
1535 1535 ASSERT3U(*cnt, >=, size);
1536 1536 atomic_add_64(cnt, -size);
1537 1537 }
1538 1538 ASSERT3U(state->arcs_size, >=, size);
1539 1539 atomic_add_64(&state->arcs_size, -size);
1540 1540 buf->b_data = NULL;
1541 1541
1542 1542 /*
1543 1543 * If we're destroying a duplicate buffer make sure
1544 1544 * that the appropriate statistics are updated.
1545 1545 */
1546 1546 if (buf->b_hdr->b_datacnt > 1 &&
1547 1547 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1548 1548 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1549 1549 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1550 1550 }
1551 1551 ASSERT(buf->b_hdr->b_datacnt > 0);
1552 1552 buf->b_hdr->b_datacnt -= 1;
1553 1553 }
1554 1554
1555 1555 /* only remove the buf if requested */
1556 1556 if (!remove)
1557 1557 return;
1558 1558
1559 1559 /* remove the buf from the hdr list */
1560 1560 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1561 1561 continue;
1562 1562 *bufp = buf->b_next;
1563 1563 buf->b_next = NULL;
1564 1564
1565 1565 ASSERT(buf->b_efunc == NULL);
1566 1566
1567 1567 /* clean up the buf */
1568 1568 buf->b_hdr = NULL;
1569 1569 kmem_cache_free(buf_cache, buf);
1570 1570 }
1571 1571
1572 1572 static void
1573 1573 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1574 1574 {
1575 1575 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1576 1576 ASSERT3P(hdr->b_state, ==, arc_anon);
1577 1577 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1578 1578 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1579 1579
1580 1580 if (l2hdr != NULL) {
1581 1581 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1582 1582 /*
1583 1583 * To prevent arc_free() and l2arc_evict() from
1584 1584 * attempting to free the same buffer at the same time,
1585 1585 * a FREE_IN_PROGRESS flag is given to arc_free() to
1586 1586 * give it priority. l2arc_evict() can't destroy this
1587 1587 * header while we are waiting on l2arc_buflist_mtx.
1588 1588 *
1589 1589 * The hdr may be removed from l2ad_buflist before we
1590 1590 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1591 1591 */
1592 1592 if (!buflist_held) {
1593 1593 mutex_enter(&l2arc_buflist_mtx);
1594 1594 l2hdr = hdr->b_l2hdr;
1595 1595 }
1596 1596
1597 1597 if (l2hdr != NULL) {
1598 1598 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1599 1599 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1600 1600 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1601 1601 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
1602 1602 -l2hdr->b_asize, 0, 0);
1603 1603 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1604 1604 if (hdr->b_state == arc_l2c_only)
1605 1605 l2arc_hdr_stat_remove();
1606 1606 hdr->b_l2hdr = NULL;
1607 1607 }
1608 1608
1609 1609 if (!buflist_held)
1610 1610 mutex_exit(&l2arc_buflist_mtx);
1611 1611 }
1612 1612
1613 1613 if (!BUF_EMPTY(hdr)) {
1614 1614 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1615 1615 buf_discard_identity(hdr);
1616 1616 }
1617 1617 while (hdr->b_buf) {
1618 1618 arc_buf_t *buf = hdr->b_buf;
1619 1619
1620 1620 if (buf->b_efunc) {
1621 1621 mutex_enter(&arc_eviction_mtx);
1622 1622 mutex_enter(&buf->b_evict_lock);
1623 1623 ASSERT(buf->b_hdr != NULL);
1624 1624 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1625 1625 hdr->b_buf = buf->b_next;
1626 1626 buf->b_hdr = &arc_eviction_hdr;
1627 1627 buf->b_next = arc_eviction_list;
1628 1628 arc_eviction_list = buf;
1629 1629 mutex_exit(&buf->b_evict_lock);
1630 1630 mutex_exit(&arc_eviction_mtx);
1631 1631 } else {
1632 1632 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1633 1633 }
1634 1634 }
1635 1635 if (hdr->b_freeze_cksum != NULL) {
1636 1636 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1637 1637 hdr->b_freeze_cksum = NULL;
1638 1638 }
1639 1639 if (hdr->b_thawed) {
1640 1640 kmem_free(hdr->b_thawed, 1);
1641 1641 hdr->b_thawed = NULL;
1642 1642 }
1643 1643
1644 1644 ASSERT(!list_link_active(&hdr->b_arc_node));
1645 1645 ASSERT3P(hdr->b_hash_next, ==, NULL);
1646 1646 ASSERT3P(hdr->b_acb, ==, NULL);
1647 1647 kmem_cache_free(hdr_cache, hdr);
1648 1648 }
1649 1649
1650 1650 void
1651 1651 arc_buf_free(arc_buf_t *buf, void *tag)
1652 1652 {
1653 1653 arc_buf_hdr_t *hdr = buf->b_hdr;
1654 1654 int hashed = hdr->b_state != arc_anon;
1655 1655
1656 1656 ASSERT(buf->b_efunc == NULL);
1657 1657 ASSERT(buf->b_data != NULL);
1658 1658
1659 1659 if (hashed) {
1660 1660 kmutex_t *hash_lock = HDR_LOCK(hdr);
1661 1661
1662 1662 mutex_enter(hash_lock);
1663 1663 hdr = buf->b_hdr;
1664 1664 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1665 1665
1666 1666 (void) remove_reference(hdr, hash_lock, tag);
1667 1667 if (hdr->b_datacnt > 1) {
1668 1668 arc_buf_destroy(buf, FALSE, TRUE);
1669 1669 } else {
1670 1670 ASSERT(buf == hdr->b_buf);
1671 1671 ASSERT(buf->b_efunc == NULL);
1672 1672 hdr->b_flags |= ARC_BUF_AVAILABLE;
1673 1673 }
1674 1674 mutex_exit(hash_lock);
1675 1675 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1676 1676 int destroy_hdr;
1677 1677 /*
1678 1678 * We are in the middle of an async write. Don't destroy
1679 1679 * this buffer unless the write completes before we finish
1680 1680 * decrementing the reference count.
1681 1681 */
1682 1682 mutex_enter(&arc_eviction_mtx);
1683 1683 (void) remove_reference(hdr, NULL, tag);
1684 1684 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1685 1685 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1686 1686 mutex_exit(&arc_eviction_mtx);
1687 1687 if (destroy_hdr)
1688 1688 arc_hdr_destroy(hdr);
1689 1689 } else {
1690 1690 if (remove_reference(hdr, NULL, tag) > 0)
1691 1691 arc_buf_destroy(buf, FALSE, TRUE);
1692 1692 else
1693 1693 arc_hdr_destroy(hdr);
1694 1694 }
1695 1695 }
1696 1696
1697 1697 boolean_t
1698 1698 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1699 1699 {
1700 1700 arc_buf_hdr_t *hdr = buf->b_hdr;
1701 1701 kmutex_t *hash_lock = HDR_LOCK(hdr);
1702 1702 boolean_t no_callback = (buf->b_efunc == NULL);
1703 1703
1704 1704 if (hdr->b_state == arc_anon) {
1705 1705 ASSERT(hdr->b_datacnt == 1);
1706 1706 arc_buf_free(buf, tag);
1707 1707 return (no_callback);
1708 1708 }
1709 1709
1710 1710 mutex_enter(hash_lock);
1711 1711 hdr = buf->b_hdr;
1712 1712 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1713 1713 ASSERT(hdr->b_state != arc_anon);
1714 1714 ASSERT(buf->b_data != NULL);
1715 1715
1716 1716 (void) remove_reference(hdr, hash_lock, tag);
1717 1717 if (hdr->b_datacnt > 1) {
1718 1718 if (no_callback)
1719 1719 arc_buf_destroy(buf, FALSE, TRUE);
1720 1720 } else if (no_callback) {
1721 1721 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1722 1722 ASSERT(buf->b_efunc == NULL);
1723 1723 hdr->b_flags |= ARC_BUF_AVAILABLE;
1724 1724 }
1725 1725 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1726 1726 refcount_is_zero(&hdr->b_refcnt));
1727 1727 mutex_exit(hash_lock);
1728 1728 return (no_callback);
1729 1729 }
1730 1730
1731 1731 int
1732 1732 arc_buf_size(arc_buf_t *buf)
1733 1733 {
1734 1734 return (buf->b_hdr->b_size);
1735 1735 }
1736 1736
1737 1737 /*
1738 1738 * Called from the DMU to determine if the current buffer should be
1739 1739 * evicted. In order to ensure proper locking, the eviction must be initiated
1740 1740 * from the DMU. Return true if the buffer is associated with user data and
1741 1741 * duplicate buffers still exist.
1742 1742 */
1743 1743 boolean_t
1744 1744 arc_buf_eviction_needed(arc_buf_t *buf)
1745 1745 {
1746 1746 arc_buf_hdr_t *hdr;
1747 1747 boolean_t evict_needed = B_FALSE;
1748 1748
1749 1749 if (zfs_disable_dup_eviction)
1750 1750 return (B_FALSE);
1751 1751
1752 1752 mutex_enter(&buf->b_evict_lock);
1753 1753 hdr = buf->b_hdr;
1754 1754 if (hdr == NULL) {
1755 1755 /*
1756 1756 * We are in arc_do_user_evicts(); let that function
1757 1757 * perform the eviction.
1758 1758 */
1759 1759 ASSERT(buf->b_data == NULL);
1760 1760 mutex_exit(&buf->b_evict_lock);
1761 1761 return (B_FALSE);
1762 1762 } else if (buf->b_data == NULL) {
1763 1763 /*
1764 1764 * We have already been added to the arc eviction list;
1765 1765 * recommend eviction.
1766 1766 */
1767 1767 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1768 1768 mutex_exit(&buf->b_evict_lock);
1769 1769 return (B_TRUE);
1770 1770 }
1771 1771
1772 1772 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1773 1773 evict_needed = B_TRUE;
1774 1774
1775 1775 mutex_exit(&buf->b_evict_lock);
1776 1776 return (evict_needed);
1777 1777 }
1778 1778
1779 1779 /*
1780 1780 * Evict buffers from list until we've removed the specified number of
1781 1781 * bytes. Move the removed buffers to the appropriate evict state.
1782 1782 * If the recycle flag is set, then attempt to "recycle" a buffer:
1783 1783 * - look for a buffer to evict that is `bytes' long.
1784 1784 * - return the data block from this buffer rather than freeing it.
1785 1785 * This flag is used by callers that are trying to make space for a
1786 1786 * new buffer in a full arc cache.
1787 1787 *
1788 1788 * This function makes a "best effort". It skips over any buffers
1789 1789 * it can't get a hash_lock on, and so may not catch all candidates.
1790 1790 * It may also return without evicting as much space as requested.
1791 1791 */
1792 1792 static void *
1793 1793 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1794 1794 arc_buf_contents_t type)
1795 1795 {
1796 1796 arc_state_t *evicted_state;
1797 1797 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1798 1798 arc_buf_hdr_t *ab, *ab_prev = NULL;
1799 1799 list_t *list = &state->arcs_list[type];
1800 1800 kmutex_t *hash_lock;
1801 1801 boolean_t have_lock;
1802 1802 void *stolen = NULL;
1803 1803 arc_buf_hdr_t marker = { 0 };
1804 1804 int count = 0;
1805 1805
1806 1806 ASSERT(state == arc_mru || state == arc_mfu);
1807 1807
1808 1808 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1809 1809
1810 1810 mutex_enter(&state->arcs_mtx);
1811 1811 mutex_enter(&evicted_state->arcs_mtx);
1812 1812
1813 1813 for (ab = list_tail(list); ab; ab = ab_prev) {
1814 1814 ab_prev = list_prev(list, ab);
1815 1815 /* prefetch buffers have a minimum lifespan */
1816 1816 if (HDR_IO_IN_PROGRESS(ab) ||
1817 1817 (spa && ab->b_spa != spa) ||
1818 1818 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1819 1819 ddi_get_lbolt() - ab->b_arc_access <
1820 1820 arc_min_prefetch_lifespan)) {
1821 1821 skipped++;
1822 1822 continue;
1823 1823 }
1824 1824 /* "lookahead" for better eviction candidate */
1825 1825 if (recycle && ab->b_size != bytes &&
1826 1826 ab_prev && ab_prev->b_size == bytes)
1827 1827 continue;
1828 1828
1829 1829 /* ignore markers */
1830 1830 if (ab->b_spa == 0)
1831 1831 continue;
1832 1832
1833 1833 /*
1834 1834 * It may take a long time to evict all the bufs requested.
1835 1835 * To avoid blocking all arc activity, periodically drop
1836 1836 * the arcs_mtx and give other threads a chance to run
1837 1837 * before reacquiring the lock.
1838 1838 *
1839 1839 * If we are looking for a buffer to recycle, we are in
1840 1840 * the hot code path, so don't sleep.
1841 1841 */
1842 1842 if (!recycle && count++ > arc_evict_iterations) {
1843 1843 list_insert_after(list, ab, &marker);
1844 1844 mutex_exit(&evicted_state->arcs_mtx);
1845 1845 mutex_exit(&state->arcs_mtx);
1846 1846 kpreempt(KPREEMPT_SYNC);
1847 1847 mutex_enter(&state->arcs_mtx);
1848 1848 mutex_enter(&evicted_state->arcs_mtx);
1849 1849 ab_prev = list_prev(list, &marker);
1850 1850 list_remove(list, &marker);
1851 1851 count = 0;
1852 1852 continue;
1853 1853 }
1854 1854
1855 1855 hash_lock = HDR_LOCK(ab);
1856 1856 have_lock = MUTEX_HELD(hash_lock);
1857 1857 if (have_lock || mutex_tryenter(hash_lock)) {
1858 1858 ASSERT0(refcount_count(&ab->b_refcnt));
1859 1859 ASSERT(ab->b_datacnt > 0);
1860 1860 while (ab->b_buf) {
1861 1861 arc_buf_t *buf = ab->b_buf;
1862 1862 if (!mutex_tryenter(&buf->b_evict_lock)) {
1863 1863 missed += 1;
1864 1864 break;
1865 1865 }
1866 1866 if (buf->b_data) {
1867 1867 bytes_evicted += ab->b_size;
1868 1868 if (recycle && ab->b_type == type &&
1869 1869 ab->b_size == bytes &&
1870 1870 !HDR_L2_WRITING(ab)) {
1871 1871 stolen = buf->b_data;
1872 1872 recycle = FALSE;
1873 1873 }
1874 1874 }
1875 1875 if (buf->b_efunc) {
1876 1876 mutex_enter(&arc_eviction_mtx);
1877 1877 arc_buf_destroy(buf,
1878 1878 buf->b_data == stolen, FALSE);
1879 1879 ab->b_buf = buf->b_next;
1880 1880 buf->b_hdr = &arc_eviction_hdr;
1881 1881 buf->b_next = arc_eviction_list;
1882 1882 arc_eviction_list = buf;
1883 1883 mutex_exit(&arc_eviction_mtx);
1884 1884 mutex_exit(&buf->b_evict_lock);
1885 1885 } else {
1886 1886 mutex_exit(&buf->b_evict_lock);
1887 1887 arc_buf_destroy(buf,
1888 1888 buf->b_data == stolen, TRUE);
1889 1889 }
1890 1890 }
1891 1891
1892 1892 if (ab->b_l2hdr) {
1893 1893 ARCSTAT_INCR(arcstat_evict_l2_cached,
1894 1894 ab->b_size);
1895 1895 } else {
1896 1896 if (l2arc_write_eligible(ab->b_spa, ab)) {
1897 1897 ARCSTAT_INCR(arcstat_evict_l2_eligible,
1898 1898 ab->b_size);
1899 1899 } else {
1900 1900 ARCSTAT_INCR(
1901 1901 arcstat_evict_l2_ineligible,
1902 1902 ab->b_size);
1903 1903 }
1904 1904 }
1905 1905
1906 1906 if (ab->b_datacnt == 0) {
1907 1907 arc_change_state(evicted_state, ab, hash_lock);
1908 1908 ASSERT(HDR_IN_HASH_TABLE(ab));
1909 1909 ab->b_flags |= ARC_IN_HASH_TABLE;
1910 1910 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1911 1911 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1912 1912 }
1913 1913 if (!have_lock)
1914 1914 mutex_exit(hash_lock);
1915 1915 if (bytes >= 0 && bytes_evicted >= bytes)
1916 1916 break;
1917 1917 } else {
1918 1918 missed += 1;
1919 1919 }
1920 1920 }
1921 1921
1922 1922 mutex_exit(&evicted_state->arcs_mtx);
1923 1923 mutex_exit(&state->arcs_mtx);
1924 1924
1925 1925 if (bytes_evicted < bytes)
1926 1926 dprintf("only evicted %lld bytes from %x",
1927 1927 (longlong_t)bytes_evicted, state);
1928 1928
1929 1929 if (skipped)
1930 1930 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1931 1931
1932 1932 if (missed)
1933 1933 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1934 1934
1935 1935 /*
1936 1936 * Note: we have just evicted some data into the ghost state,
1937 1937 * potentially putting the ghost size over the desired size. Rather
1938 1938 * that evicting from the ghost list in this hot code path, leave
1939 1939 * this chore to the arc_reclaim_thread().
1940 1940 */
1941 1941
1942 1942 return (stolen);
1943 1943 }
1944 1944
1945 1945 /*
1946 1946 * Remove buffers from list until we've removed the specified number of
1947 1947 * bytes. Destroy the buffers that are removed.
1948 1948 */
1949 1949 static void
1950 1950 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1951 1951 {
1952 1952 arc_buf_hdr_t *ab, *ab_prev;
1953 1953 arc_buf_hdr_t marker = { 0 };
1954 1954 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1955 1955 kmutex_t *hash_lock;
1956 1956 uint64_t bytes_deleted = 0;
1957 1957 uint64_t bufs_skipped = 0;
1958 1958 int count = 0;
1959 1959
1960 1960 ASSERT(GHOST_STATE(state));
1961 1961 top:
1962 1962 mutex_enter(&state->arcs_mtx);
1963 1963 for (ab = list_tail(list); ab; ab = ab_prev) {
1964 1964 ab_prev = list_prev(list, ab);
1965 1965 if (ab->b_type > ARC_BUFC_NUMTYPES)
1966 1966 panic("invalid ab=%p", (void *)ab);
1967 1967 if (spa && ab->b_spa != spa)
1968 1968 continue;
1969 1969
1970 1970 /* ignore markers */
1971 1971 if (ab->b_spa == 0)
1972 1972 continue;
1973 1973
1974 1974 hash_lock = HDR_LOCK(ab);
1975 1975 /* caller may be trying to modify this buffer, skip it */
1976 1976 if (MUTEX_HELD(hash_lock))
1977 1977 continue;
1978 1978
1979 1979 /*
1980 1980 * It may take a long time to evict all the bufs requested.
1981 1981 * To avoid blocking all arc activity, periodically drop
1982 1982 * the arcs_mtx and give other threads a chance to run
1983 1983 * before reacquiring the lock.
1984 1984 */
1985 1985 if (count++ > arc_evict_iterations) {
1986 1986 list_insert_after(list, ab, &marker);
1987 1987 mutex_exit(&state->arcs_mtx);
1988 1988 kpreempt(KPREEMPT_SYNC);
1989 1989 mutex_enter(&state->arcs_mtx);
1990 1990 ab_prev = list_prev(list, &marker);
1991 1991 list_remove(list, &marker);
1992 1992 count = 0;
1993 1993 continue;
1994 1994 }
1995 1995 if (mutex_tryenter(hash_lock)) {
1996 1996 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1997 1997 ASSERT(ab->b_buf == NULL);
1998 1998 ARCSTAT_BUMP(arcstat_deleted);
1999 1999 bytes_deleted += ab->b_size;
2000 2000
2001 2001 if (ab->b_l2hdr != NULL) {
2002 2002 /*
2003 2003 * This buffer is cached on the 2nd Level ARC;
2004 2004 * don't destroy the header.
2005 2005 */
2006 2006 arc_change_state(arc_l2c_only, ab, hash_lock);
2007 2007 mutex_exit(hash_lock);
2008 2008 } else {
2009 2009 arc_change_state(arc_anon, ab, hash_lock);
2010 2010 mutex_exit(hash_lock);
2011 2011 arc_hdr_destroy(ab);
2012 2012 }
2013 2013
2014 2014 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2015 2015 if (bytes >= 0 && bytes_deleted >= bytes)
2016 2016 break;
2017 2017 } else if (bytes < 0) {
2018 2018 /*
2019 2019 * Insert a list marker and then wait for the
2020 2020 * hash lock to become available. Once its
2021 2021 * available, restart from where we left off.
2022 2022 */
2023 2023 list_insert_after(list, ab, &marker);
2024 2024 mutex_exit(&state->arcs_mtx);
2025 2025 mutex_enter(hash_lock);
2026 2026 mutex_exit(hash_lock);
2027 2027 mutex_enter(&state->arcs_mtx);
2028 2028 ab_prev = list_prev(list, &marker);
2029 2029 list_remove(list, &marker);
2030 2030 } else {
2031 2031 bufs_skipped += 1;
2032 2032 }
2033 2033
2034 2034 }
2035 2035 mutex_exit(&state->arcs_mtx);
2036 2036
2037 2037 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
2038 2038 (bytes < 0 || bytes_deleted < bytes)) {
2039 2039 list = &state->arcs_list[ARC_BUFC_METADATA];
2040 2040 goto top;
2041 2041 }
2042 2042
2043 2043 if (bufs_skipped) {
2044 2044 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2045 2045 ASSERT(bytes >= 0);
2046 2046 }
2047 2047
2048 2048 if (bytes_deleted < bytes)
2049 2049 dprintf("only deleted %lld bytes from %p",
2050 2050 (longlong_t)bytes_deleted, state);
2051 2051 }
2052 2052
2053 2053 static void
2054 2054 arc_adjust(void)
2055 2055 {
2056 2056 int64_t adjustment, delta;
2057 2057
2058 2058 /*
2059 2059 * Adjust MRU size
2060 2060 */
2061 2061
2062 2062 adjustment = MIN((int64_t)(arc_size - arc_c),
2063 2063 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2064 2064 arc_p));
2065 2065
2066 2066 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2067 2067 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2068 2068 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
2069 2069 adjustment -= delta;
2070 2070 }
2071 2071
2072 2072 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2073 2073 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2074 2074 (void) arc_evict(arc_mru, NULL, delta, FALSE,
2075 2075 ARC_BUFC_METADATA);
2076 2076 }
2077 2077
2078 2078 /*
2079 2079 * Adjust MFU size
2080 2080 */
2081 2081
2082 2082 adjustment = arc_size - arc_c;
2083 2083
2084 2084 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2085 2085 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2086 2086 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
2087 2087 adjustment -= delta;
2088 2088 }
2089 2089
2090 2090 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2091 2091 int64_t delta = MIN(adjustment,
2092 2092 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2093 2093 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
2094 2094 ARC_BUFC_METADATA);
2095 2095 }
2096 2096
2097 2097 /*
2098 2098 * Adjust ghost lists
2099 2099 */
2100 2100
2101 2101 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2102 2102
2103 2103 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2104 2104 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2105 2105 arc_evict_ghost(arc_mru_ghost, NULL, delta);
2106 2106 }
2107 2107
2108 2108 adjustment =
2109 2109 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2110 2110
2111 2111 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2112 2112 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2113 2113 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
2114 2114 }
2115 2115 }
2116 2116
2117 2117 static void
2118 2118 arc_do_user_evicts(void)
2119 2119 {
2120 2120 mutex_enter(&arc_eviction_mtx);
2121 2121 while (arc_eviction_list != NULL) {
2122 2122 arc_buf_t *buf = arc_eviction_list;
2123 2123 arc_eviction_list = buf->b_next;
2124 2124 mutex_enter(&buf->b_evict_lock);
2125 2125 buf->b_hdr = NULL;
2126 2126 mutex_exit(&buf->b_evict_lock);
2127 2127 mutex_exit(&arc_eviction_mtx);
2128 2128
2129 2129 if (buf->b_efunc != NULL)
2130 2130 VERIFY0(buf->b_efunc(buf->b_private));
2131 2131
2132 2132 buf->b_efunc = NULL;
2133 2133 buf->b_private = NULL;
2134 2134 kmem_cache_free(buf_cache, buf);
2135 2135 mutex_enter(&arc_eviction_mtx);
2136 2136 }
2137 2137 mutex_exit(&arc_eviction_mtx);
2138 2138 }
2139 2139
2140 2140 /*
2141 2141 * Flush all *evictable* data from the cache for the given spa.
2142 2142 * NOTE: this will not touch "active" (i.e. referenced) data.
2143 2143 */
2144 2144 void
2145 2145 arc_flush(spa_t *spa)
2146 2146 {
2147 2147 uint64_t guid = 0;
2148 2148
2149 2149 if (spa)
2150 2150 guid = spa_load_guid(spa);
2151 2151
2152 2152 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
2153 2153 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2154 2154 if (spa)
2155 2155 break;
2156 2156 }
2157 2157 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
2158 2158 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2159 2159 if (spa)
2160 2160 break;
2161 2161 }
2162 2162 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
2163 2163 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2164 2164 if (spa)
2165 2165 break;
2166 2166 }
2167 2167 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
2168 2168 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2169 2169 if (spa)
2170 2170 break;
2171 2171 }
2172 2172
2173 2173 arc_evict_ghost(arc_mru_ghost, guid, -1);
2174 2174 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2175 2175
2176 2176 mutex_enter(&arc_reclaim_thr_lock);
2177 2177 arc_do_user_evicts();
2178 2178 mutex_exit(&arc_reclaim_thr_lock);
2179 2179 ASSERT(spa || arc_eviction_list == NULL);
2180 2180 }
2181 2181
2182 2182 void
2183 2183 arc_shrink(void)
2184 2184 {
2185 2185 if (arc_c > arc_c_min) {
2186 2186 uint64_t to_free;
2187 2187
2188 2188 #ifdef _KERNEL
2189 2189 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
2190 2190 #else
2191 2191 to_free = arc_c >> arc_shrink_shift;
2192 2192 #endif
2193 2193 if (arc_c > arc_c_min + to_free)
2194 2194 atomic_add_64(&arc_c, -to_free);
2195 2195 else
2196 2196 arc_c = arc_c_min;
2197 2197
2198 2198 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2199 2199 if (arc_c > arc_size)
2200 2200 arc_c = MAX(arc_size, arc_c_min);
2201 2201 if (arc_p > arc_c)
2202 2202 arc_p = (arc_c >> 1);
2203 2203 ASSERT(arc_c >= arc_c_min);
2204 2204 ASSERT((int64_t)arc_p >= 0);
2205 2205 }
2206 2206
2207 2207 if (arc_size > arc_c)
2208 2208 arc_adjust();
2209 2209 }
2210 2210
2211 2211 /*
2212 2212 * Determine if the system is under memory pressure and is asking
2213 2213 * to reclaim memory. A return value of 1 indicates that the system
2214 2214 * is under memory pressure and that the arc should adjust accordingly.
2215 2215 */
2216 2216 static int
2217 2217 arc_reclaim_needed(void)
2218 2218 {
2219 2219 uint64_t extra;
2220 2220
2221 2221 #ifdef _KERNEL
2222 2222
2223 2223 if (needfree)
2224 2224 return (1);
2225 2225
2226 2226 /*
2227 2227 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2228 2228 */
2229 2229 extra = desfree;
2230 2230
2231 2231 /*
2232 2232 * check that we're out of range of the pageout scanner. It starts to
2233 2233 * schedule paging if freemem is less than lotsfree and needfree.
2234 2234 * lotsfree is the high-water mark for pageout, and needfree is the
2235 2235 * number of needed free pages. We add extra pages here to make sure
2236 2236 * the scanner doesn't start up while we're freeing memory.
2237 2237 */
2238 2238 if (freemem < lotsfree + needfree + extra)
2239 2239 return (1);
2240 2240
2241 2241 /*
2242 2242 * check to make sure that swapfs has enough space so that anon
2243 2243 * reservations can still succeed. anon_resvmem() checks that the
2244 2244 * availrmem is greater than swapfs_minfree, and the number of reserved
2245 2245 * swap pages. We also add a bit of extra here just to prevent
2246 2246 * circumstances from getting really dire.
2247 2247 */
2248 2248 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2249 2249 return (1);
2250 2250
2251 2251 /*
2252 2252 * Check that we have enough availrmem that memory locking (e.g., via
2253 2253 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
2254 2254 * stores the number of pages that cannot be locked; when availrmem
2255 2255 * drops below pages_pp_maximum, page locking mechanisms such as
2256 2256 * page_pp_lock() will fail.)
2257 2257 */
2258 2258 if (availrmem <= pages_pp_maximum)
2259 2259 return (1);
2260 2260
2261 2261 #if defined(__i386)
2262 2262 /*
2263 2263 * If we're on an i386 platform, it's possible that we'll exhaust the
2264 2264 * kernel heap space before we ever run out of available physical
2265 2265 * memory. Most checks of the size of the heap_area compare against
2266 2266 * tune.t_minarmem, which is the minimum available real memory that we
2267 2267 * can have in the system. However, this is generally fixed at 25 pages
2268 2268 * which is so low that it's useless. In this comparison, we seek to
2269 2269 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2270 2270 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2271 2271 * free)
2272 2272 */
2273 2273 if (vmem_size(heap_arena, VMEM_FREE) <
2274 2274 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2275 2275 return (1);
2276 2276 #endif
2277 2277
2278 2278 /*
2279 2279 * If zio data pages are being allocated out of a separate heap segment,
2280 2280 * then enforce that the size of available vmem for this arena remains
2281 2281 * above about 1/16th free.
2282 2282 *
2283 2283 * Note: The 1/16th arena free requirement was put in place
2284 2284 * to aggressively evict memory from the arc in order to avoid
2285 2285 * memory fragmentation issues.
2286 2286 */
2287 2287 if (zio_arena != NULL &&
2288 2288 vmem_size(zio_arena, VMEM_FREE) <
2289 2289 (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2290 2290 return (1);
2291 2291 #else
2292 2292 if (spa_get_random(100) == 0)
2293 2293 return (1);
2294 2294 #endif
2295 2295 return (0);
2296 2296 }
2297 2297
2298 2298 static void
2299 2299 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2300 2300 {
2301 2301 size_t i;
2302 2302 kmem_cache_t *prev_cache = NULL;
2303 2303 kmem_cache_t *prev_data_cache = NULL;
2304 2304 extern kmem_cache_t *zio_buf_cache[];
2305 2305 extern kmem_cache_t *zio_data_buf_cache[];
2306 2306 extern kmem_cache_t *range_seg_cache;
2307 2307
2308 2308 #ifdef _KERNEL
2309 2309 if (arc_meta_used >= arc_meta_limit) {
2310 2310 /*
2311 2311 * We are exceeding our meta-data cache limit.
2312 2312 * Purge some DNLC entries to release holds on meta-data.
2313 2313 */
2314 2314 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2315 2315 }
2316 2316 #if defined(__i386)
2317 2317 /*
2318 2318 * Reclaim unused memory from all kmem caches.
2319 2319 */
2320 2320 kmem_reap();
2321 2321 #endif
2322 2322 #endif
2323 2323
2324 2324 /*
2325 2325 * An aggressive reclamation will shrink the cache size as well as
2326 2326 * reap free buffers from the arc kmem caches.
2327 2327 */
2328 2328 if (strat == ARC_RECLAIM_AGGR)
2329 2329 arc_shrink();
2330 2330
2331 2331 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2332 2332 if (zio_buf_cache[i] != prev_cache) {
2333 2333 prev_cache = zio_buf_cache[i];
2334 2334 kmem_cache_reap_now(zio_buf_cache[i]);
2335 2335 }
2336 2336 if (zio_data_buf_cache[i] != prev_data_cache) {
2337 2337 prev_data_cache = zio_data_buf_cache[i];
2338 2338 kmem_cache_reap_now(zio_data_buf_cache[i]);
2339 2339 }
2340 2340 }
2341 2341 kmem_cache_reap_now(buf_cache);
2342 2342 kmem_cache_reap_now(hdr_cache);
2343 2343 kmem_cache_reap_now(range_seg_cache);
2344 2344
2345 2345 /*
2346 2346 * Ask the vmem areana to reclaim unused memory from its
2347 2347 * quantum caches.
2348 2348 */
2349 2349 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2350 2350 vmem_qcache_reap(zio_arena);
2351 2351 }
2352 2352
2353 2353 static void
2354 2354 arc_reclaim_thread(void)
2355 2355 {
2356 2356 clock_t growtime = 0;
2357 2357 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2358 2358 callb_cpr_t cpr;
2359 2359
2360 2360 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2361 2361
2362 2362 mutex_enter(&arc_reclaim_thr_lock);
2363 2363 while (arc_thread_exit == 0) {
2364 2364 if (arc_reclaim_needed()) {
2365 2365
2366 2366 if (arc_no_grow) {
2367 2367 if (last_reclaim == ARC_RECLAIM_CONS) {
2368 2368 last_reclaim = ARC_RECLAIM_AGGR;
2369 2369 } else {
2370 2370 last_reclaim = ARC_RECLAIM_CONS;
2371 2371 }
2372 2372 } else {
2373 2373 arc_no_grow = TRUE;
2374 2374 last_reclaim = ARC_RECLAIM_AGGR;
2375 2375 membar_producer();
2376 2376 }
2377 2377
2378 2378 /* reset the growth delay for every reclaim */
2379 2379 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2380 2380
2381 2381 arc_kmem_reap_now(last_reclaim);
2382 2382 arc_warm = B_TRUE;
2383 2383
2384 2384 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2385 2385 arc_no_grow = FALSE;
2386 2386 }
2387 2387
2388 2388 arc_adjust();
2389 2389
2390 2390 if (arc_eviction_list != NULL)
2391 2391 arc_do_user_evicts();
2392 2392
2393 2393 /* block until needed, or one second, whichever is shorter */
2394 2394 CALLB_CPR_SAFE_BEGIN(&cpr);
2395 2395 (void) cv_timedwait(&arc_reclaim_thr_cv,
2396 2396 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2397 2397 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2398 2398 }
2399 2399
2400 2400 arc_thread_exit = 0;
2401 2401 cv_broadcast(&arc_reclaim_thr_cv);
2402 2402 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2403 2403 thread_exit();
2404 2404 }
2405 2405
2406 2406 /*
2407 2407 * Adapt arc info given the number of bytes we are trying to add and
2408 2408 * the state that we are comming from. This function is only called
2409 2409 * when we are adding new content to the cache.
2410 2410 */
2411 2411 static void
2412 2412 arc_adapt(int bytes, arc_state_t *state)
2413 2413 {
2414 2414 int mult;
2415 2415 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2416 2416
2417 2417 if (state == arc_l2c_only)
2418 2418 return;
2419 2419
2420 2420 ASSERT(bytes > 0);
2421 2421 /*
2422 2422 * Adapt the target size of the MRU list:
2423 2423 * - if we just hit in the MRU ghost list, then increase
2424 2424 * the target size of the MRU list.
2425 2425 * - if we just hit in the MFU ghost list, then increase
2426 2426 * the target size of the MFU list by decreasing the
2427 2427 * target size of the MRU list.
2428 2428 */
2429 2429 if (state == arc_mru_ghost) {
2430 2430 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2431 2431 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2432 2432 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2433 2433
2434 2434 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2435 2435 } else if (state == arc_mfu_ghost) {
2436 2436 uint64_t delta;
2437 2437
2438 2438 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2439 2439 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2440 2440 mult = MIN(mult, 10);
2441 2441
2442 2442 delta = MIN(bytes * mult, arc_p);
2443 2443 arc_p = MAX(arc_p_min, arc_p - delta);
2444 2444 }
2445 2445 ASSERT((int64_t)arc_p >= 0);
2446 2446
2447 2447 if (arc_reclaim_needed()) {
2448 2448 cv_signal(&arc_reclaim_thr_cv);
2449 2449 return;
2450 2450 }
2451 2451
2452 2452 if (arc_no_grow)
2453 2453 return;
2454 2454
2455 2455 if (arc_c >= arc_c_max)
2456 2456 return;
2457 2457
2458 2458 /*
2459 2459 * If we're within (2 * maxblocksize) bytes of the target
2460 2460 * cache size, increment the target cache size
2461 2461 */
2462 2462 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2463 2463 atomic_add_64(&arc_c, (int64_t)bytes);
2464 2464 if (arc_c > arc_c_max)
2465 2465 arc_c = arc_c_max;
2466 2466 else if (state == arc_anon)
2467 2467 atomic_add_64(&arc_p, (int64_t)bytes);
2468 2468 if (arc_p > arc_c)
2469 2469 arc_p = arc_c;
2470 2470 }
2471 2471 ASSERT((int64_t)arc_p >= 0);
2472 2472 }
2473 2473
2474 2474 /*
2475 2475 * Check if the cache has reached its limits and eviction is required
2476 2476 * prior to insert.
2477 2477 */
2478 2478 static int
2479 2479 arc_evict_needed(arc_buf_contents_t type)
2480 2480 {
2481 2481 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2482 2482 return (1);
2483 2483
2484 2484 if (arc_reclaim_needed())
2485 2485 return (1);
2486 2486
2487 2487 return (arc_size > arc_c);
2488 2488 }
2489 2489
2490 2490 /*
2491 2491 * The buffer, supplied as the first argument, needs a data block.
2492 2492 * So, if we are at cache max, determine which cache should be victimized.
2493 2493 * We have the following cases:
2494 2494 *
2495 2495 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2496 2496 * In this situation if we're out of space, but the resident size of the MFU is
2497 2497 * under the limit, victimize the MFU cache to satisfy this insertion request.
2498 2498 *
2499 2499 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2500 2500 * Here, we've used up all of the available space for the MRU, so we need to
2501 2501 * evict from our own cache instead. Evict from the set of resident MRU
2502 2502 * entries.
2503 2503 *
2504 2504 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2505 2505 * c minus p represents the MFU space in the cache, since p is the size of the
2506 2506 * cache that is dedicated to the MRU. In this situation there's still space on
2507 2507 * the MFU side, so the MRU side needs to be victimized.
2508 2508 *
2509 2509 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2510 2510 * MFU's resident set is consuming more space than it has been allotted. In
2511 2511 * this situation, we must victimize our own cache, the MFU, for this insertion.
↓ open down ↓ |
2511 lines elided |
↑ open up ↑ |
2512 2512 */
2513 2513 static void
2514 2514 arc_get_data_buf(arc_buf_t *buf)
2515 2515 {
2516 2516 arc_state_t *state = buf->b_hdr->b_state;
2517 2517 uint64_t size = buf->b_hdr->b_size;
2518 2518 arc_buf_contents_t type = buf->b_hdr->b_type;
2519 2519
2520 2520 arc_adapt(size, state);
2521 2521
2522 +top:
2522 2523 /*
2523 2524 * We have not yet reached cache maximum size,
2524 2525 * just allocate a new buffer.
2525 2526 */
2526 2527 if (!arc_evict_needed(type)) {
2527 2528 if (type == ARC_BUFC_METADATA) {
2528 - buf->b_data = zio_buf_alloc(size);
2529 - arc_space_consume(size, ARC_SPACE_DATA);
2529 + buf->b_data = zio_buf_alloc_canfail(size);
2530 + if (buf->b_data != NULL) {
2531 + arc_space_consume(size, ARC_SPACE_DATA);
2532 + goto out;
2533 + }
2530 2534 } else {
2531 2535 ASSERT(type == ARC_BUFC_DATA);
2532 - buf->b_data = zio_data_buf_alloc(size);
2533 - ARCSTAT_INCR(arcstat_data_size, size);
2534 - atomic_add_64(&arc_size, size);
2536 + buf->b_data = zio_data_buf_alloc_canfail(size);
2537 + if (buf->b_data != NULL) {
2538 + ARCSTAT_INCR(arcstat_data_size, size);
2539 + atomic_add_64(&arc_size, size);
2540 + goto out;
2541 + }
2535 2542 }
2536 - goto out;
2543 + /*
2544 + * Memory allocation failed probably due to excessive
2545 + * fragmentation, we need to evict regardless.
2546 + */
2537 2547 }
2538 2548
2539 2549 /*
2540 2550 * If we are prefetching from the mfu ghost list, this buffer
2541 2551 * will end up on the mru list; so steal space from there.
2542 2552 */
2543 2553 if (state == arc_mfu_ghost)
2544 2554 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2545 2555 else if (state == arc_mru_ghost)
2546 2556 state = arc_mru;
2547 2557
2548 2558 if (state == arc_mru || state == arc_anon) {
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2549 2559 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2550 2560 state = (arc_mfu->arcs_lsize[type] >= size &&
2551 2561 arc_p > mru_used) ? arc_mfu : arc_mru;
2552 2562 } else {
2553 2563 /* MFU cases */
2554 2564 uint64_t mfu_space = arc_c - arc_p;
2555 2565 state = (arc_mru->arcs_lsize[type] >= size &&
2556 2566 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2557 2567 }
2558 2568 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2559 - if (type == ARC_BUFC_METADATA) {
2560 - buf->b_data = zio_buf_alloc(size);
2561 - arc_space_consume(size, ARC_SPACE_DATA);
2562 - } else {
2563 - ASSERT(type == ARC_BUFC_DATA);
2564 - buf->b_data = zio_data_buf_alloc(size);
2565 - ARCSTAT_INCR(arcstat_data_size, size);
2566 - atomic_add_64(&arc_size, size);
2567 - }
2568 2569 ARCSTAT_BUMP(arcstat_recycle_miss);
2570 + goto top;
2569 2571 }
2570 2572 ASSERT(buf->b_data != NULL);
2571 2573 out:
2572 2574 /*
2573 2575 * Update the state size. Note that ghost states have a
2574 2576 * "ghost size" and so don't need to be updated.
2575 2577 */
2576 2578 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2577 2579 arc_buf_hdr_t *hdr = buf->b_hdr;
2578 2580
2579 2581 atomic_add_64(&hdr->b_state->arcs_size, size);
2580 2582 if (list_link_active(&hdr->b_arc_node)) {
2581 2583 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2582 2584 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2583 2585 }
2584 2586 /*
2585 2587 * If we are growing the cache, and we are adding anonymous
2586 2588 * data, and we have outgrown arc_p, update arc_p
2587 2589 */
2588 2590 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2589 2591 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2590 2592 arc_p = MIN(arc_c, arc_p + size);
2591 2593 }
2592 2594 }
2593 2595
2594 2596 /*
2595 2597 * This routine is called whenever a buffer is accessed.
2596 2598 * NOTE: the hash lock is dropped in this function.
2597 2599 */
2598 2600 static void
2599 2601 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2600 2602 {
2601 2603 clock_t now;
2602 2604
2603 2605 ASSERT(MUTEX_HELD(hash_lock));
2604 2606
2605 2607 if (buf->b_state == arc_anon) {
2606 2608 /*
2607 2609 * This buffer is not in the cache, and does not
2608 2610 * appear in our "ghost" list. Add the new buffer
2609 2611 * to the MRU state.
2610 2612 */
2611 2613
2612 2614 ASSERT(buf->b_arc_access == 0);
2613 2615 buf->b_arc_access = ddi_get_lbolt();
2614 2616 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2615 2617 arc_change_state(arc_mru, buf, hash_lock);
2616 2618
2617 2619 } else if (buf->b_state == arc_mru) {
2618 2620 now = ddi_get_lbolt();
2619 2621
2620 2622 /*
2621 2623 * If this buffer is here because of a prefetch, then either:
2622 2624 * - clear the flag if this is a "referencing" read
2623 2625 * (any subsequent access will bump this into the MFU state).
2624 2626 * or
2625 2627 * - move the buffer to the head of the list if this is
2626 2628 * another prefetch (to make it less likely to be evicted).
2627 2629 */
2628 2630 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2629 2631 if (refcount_count(&buf->b_refcnt) == 0) {
2630 2632 ASSERT(list_link_active(&buf->b_arc_node));
2631 2633 } else {
2632 2634 buf->b_flags &= ~ARC_PREFETCH;
2633 2635 ARCSTAT_BUMP(arcstat_mru_hits);
2634 2636 }
2635 2637 buf->b_arc_access = now;
2636 2638 return;
2637 2639 }
2638 2640
2639 2641 /*
2640 2642 * This buffer has been "accessed" only once so far,
2641 2643 * but it is still in the cache. Move it to the MFU
2642 2644 * state.
2643 2645 */
2644 2646 if (now > buf->b_arc_access + ARC_MINTIME) {
2645 2647 /*
2646 2648 * More than 125ms have passed since we
2647 2649 * instantiated this buffer. Move it to the
2648 2650 * most frequently used state.
2649 2651 */
2650 2652 buf->b_arc_access = now;
2651 2653 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2652 2654 arc_change_state(arc_mfu, buf, hash_lock);
2653 2655 }
2654 2656 ARCSTAT_BUMP(arcstat_mru_hits);
2655 2657 } else if (buf->b_state == arc_mru_ghost) {
2656 2658 arc_state_t *new_state;
2657 2659 /*
2658 2660 * This buffer has been "accessed" recently, but
2659 2661 * was evicted from the cache. Move it to the
2660 2662 * MFU state.
2661 2663 */
2662 2664
2663 2665 if (buf->b_flags & ARC_PREFETCH) {
2664 2666 new_state = arc_mru;
2665 2667 if (refcount_count(&buf->b_refcnt) > 0)
2666 2668 buf->b_flags &= ~ARC_PREFETCH;
2667 2669 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2668 2670 } else {
2669 2671 new_state = arc_mfu;
2670 2672 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2671 2673 }
2672 2674
2673 2675 buf->b_arc_access = ddi_get_lbolt();
2674 2676 arc_change_state(new_state, buf, hash_lock);
2675 2677
2676 2678 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2677 2679 } else if (buf->b_state == arc_mfu) {
2678 2680 /*
2679 2681 * This buffer has been accessed more than once and is
2680 2682 * still in the cache. Keep it in the MFU state.
2681 2683 *
2682 2684 * NOTE: an add_reference() that occurred when we did
2683 2685 * the arc_read() will have kicked this off the list.
2684 2686 * If it was a prefetch, we will explicitly move it to
2685 2687 * the head of the list now.
2686 2688 */
2687 2689 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2688 2690 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2689 2691 ASSERT(list_link_active(&buf->b_arc_node));
2690 2692 }
2691 2693 ARCSTAT_BUMP(arcstat_mfu_hits);
2692 2694 buf->b_arc_access = ddi_get_lbolt();
2693 2695 } else if (buf->b_state == arc_mfu_ghost) {
2694 2696 arc_state_t *new_state = arc_mfu;
2695 2697 /*
2696 2698 * This buffer has been accessed more than once but has
2697 2699 * been evicted from the cache. Move it back to the
2698 2700 * MFU state.
2699 2701 */
2700 2702
2701 2703 if (buf->b_flags & ARC_PREFETCH) {
2702 2704 /*
2703 2705 * This is a prefetch access...
2704 2706 * move this block back to the MRU state.
2705 2707 */
2706 2708 ASSERT0(refcount_count(&buf->b_refcnt));
2707 2709 new_state = arc_mru;
2708 2710 }
2709 2711
2710 2712 buf->b_arc_access = ddi_get_lbolt();
2711 2713 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2712 2714 arc_change_state(new_state, buf, hash_lock);
2713 2715
2714 2716 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2715 2717 } else if (buf->b_state == arc_l2c_only) {
2716 2718 /*
2717 2719 * This buffer is on the 2nd Level ARC.
2718 2720 */
2719 2721
2720 2722 buf->b_arc_access = ddi_get_lbolt();
2721 2723 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2722 2724 arc_change_state(arc_mfu, buf, hash_lock);
2723 2725 } else {
2724 2726 ASSERT(!"invalid arc state");
2725 2727 }
2726 2728 }
2727 2729
2728 2730 /* a generic arc_done_func_t which you can use */
2729 2731 /* ARGSUSED */
2730 2732 void
2731 2733 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2732 2734 {
2733 2735 if (zio == NULL || zio->io_error == 0)
2734 2736 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2735 2737 VERIFY(arc_buf_remove_ref(buf, arg));
2736 2738 }
2737 2739
2738 2740 /* a generic arc_done_func_t */
2739 2741 void
2740 2742 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2741 2743 {
2742 2744 arc_buf_t **bufp = arg;
2743 2745 if (zio && zio->io_error) {
2744 2746 VERIFY(arc_buf_remove_ref(buf, arg));
2745 2747 *bufp = NULL;
2746 2748 } else {
2747 2749 *bufp = buf;
2748 2750 ASSERT(buf->b_data);
2749 2751 }
2750 2752 }
2751 2753
2752 2754 static void
2753 2755 arc_read_done(zio_t *zio)
2754 2756 {
2755 2757 arc_buf_hdr_t *hdr;
2756 2758 arc_buf_t *buf;
2757 2759 arc_buf_t *abuf; /* buffer we're assigning to callback */
2758 2760 kmutex_t *hash_lock = NULL;
2759 2761 arc_callback_t *callback_list, *acb;
2760 2762 int freeable = FALSE;
2761 2763
2762 2764 buf = zio->io_private;
2763 2765 hdr = buf->b_hdr;
2764 2766
2765 2767 /*
2766 2768 * The hdr was inserted into hash-table and removed from lists
2767 2769 * prior to starting I/O. We should find this header, since
2768 2770 * it's in the hash table, and it should be legit since it's
2769 2771 * not possible to evict it during the I/O. The only possible
2770 2772 * reason for it not to be found is if we were freed during the
2771 2773 * read.
2772 2774 */
2773 2775 if (HDR_IN_HASH_TABLE(hdr)) {
2774 2776 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
2775 2777 ASSERT3U(hdr->b_dva.dva_word[0], ==,
2776 2778 BP_IDENTITY(zio->io_bp)->dva_word[0]);
2777 2779 ASSERT3U(hdr->b_dva.dva_word[1], ==,
2778 2780 BP_IDENTITY(zio->io_bp)->dva_word[1]);
2779 2781
2780 2782 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
2781 2783 &hash_lock);
2782 2784
2783 2785 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
2784 2786 hash_lock == NULL) ||
2785 2787 (found == hdr &&
2786 2788 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2787 2789 (found == hdr && HDR_L2_READING(hdr)));
2788 2790 }
2789 2791
2790 2792 hdr->b_flags &= ~ARC_L2_EVICTED;
2791 2793 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2792 2794 hdr->b_flags &= ~ARC_L2CACHE;
2793 2795
2794 2796 /* byteswap if necessary */
2795 2797 callback_list = hdr->b_acb;
2796 2798 ASSERT(callback_list != NULL);
2797 2799 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2798 2800 dmu_object_byteswap_t bswap =
2799 2801 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2800 2802 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2801 2803 byteswap_uint64_array :
2802 2804 dmu_ot_byteswap[bswap].ob_func;
2803 2805 func(buf->b_data, hdr->b_size);
2804 2806 }
2805 2807
2806 2808 arc_cksum_compute(buf, B_FALSE);
2807 2809 arc_buf_watch(buf);
2808 2810
2809 2811 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2810 2812 /*
2811 2813 * Only call arc_access on anonymous buffers. This is because
2812 2814 * if we've issued an I/O for an evicted buffer, we've already
2813 2815 * called arc_access (to prevent any simultaneous readers from
2814 2816 * getting confused).
2815 2817 */
2816 2818 arc_access(hdr, hash_lock);
2817 2819 }
2818 2820
2819 2821 /* create copies of the data buffer for the callers */
2820 2822 abuf = buf;
2821 2823 for (acb = callback_list; acb; acb = acb->acb_next) {
2822 2824 if (acb->acb_done) {
2823 2825 if (abuf == NULL) {
2824 2826 ARCSTAT_BUMP(arcstat_duplicate_reads);
2825 2827 abuf = arc_buf_clone(buf);
2826 2828 }
2827 2829 acb->acb_buf = abuf;
2828 2830 abuf = NULL;
2829 2831 }
2830 2832 }
2831 2833 hdr->b_acb = NULL;
2832 2834 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2833 2835 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2834 2836 if (abuf == buf) {
2835 2837 ASSERT(buf->b_efunc == NULL);
2836 2838 ASSERT(hdr->b_datacnt == 1);
2837 2839 hdr->b_flags |= ARC_BUF_AVAILABLE;
2838 2840 }
2839 2841
2840 2842 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2841 2843
2842 2844 if (zio->io_error != 0) {
2843 2845 hdr->b_flags |= ARC_IO_ERROR;
2844 2846 if (hdr->b_state != arc_anon)
2845 2847 arc_change_state(arc_anon, hdr, hash_lock);
2846 2848 if (HDR_IN_HASH_TABLE(hdr))
2847 2849 buf_hash_remove(hdr);
2848 2850 freeable = refcount_is_zero(&hdr->b_refcnt);
2849 2851 }
2850 2852
2851 2853 /*
2852 2854 * Broadcast before we drop the hash_lock to avoid the possibility
2853 2855 * that the hdr (and hence the cv) might be freed before we get to
2854 2856 * the cv_broadcast().
2855 2857 */
2856 2858 cv_broadcast(&hdr->b_cv);
2857 2859
2858 2860 if (hash_lock) {
2859 2861 mutex_exit(hash_lock);
2860 2862 } else {
2861 2863 /*
2862 2864 * This block was freed while we waited for the read to
2863 2865 * complete. It has been removed from the hash table and
2864 2866 * moved to the anonymous state (so that it won't show up
2865 2867 * in the cache).
2866 2868 */
2867 2869 ASSERT3P(hdr->b_state, ==, arc_anon);
2868 2870 freeable = refcount_is_zero(&hdr->b_refcnt);
2869 2871 }
2870 2872
2871 2873 /* execute each callback and free its structure */
2872 2874 while ((acb = callback_list) != NULL) {
2873 2875 if (acb->acb_done)
2874 2876 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2875 2877
2876 2878 if (acb->acb_zio_dummy != NULL) {
2877 2879 acb->acb_zio_dummy->io_error = zio->io_error;
2878 2880 zio_nowait(acb->acb_zio_dummy);
2879 2881 }
2880 2882
2881 2883 callback_list = acb->acb_next;
2882 2884 kmem_free(acb, sizeof (arc_callback_t));
2883 2885 }
2884 2886
2885 2887 if (freeable)
2886 2888 arc_hdr_destroy(hdr);
2887 2889 }
2888 2890
2889 2891 /*
2890 2892 * "Read" the block at the specified DVA (in bp) via the
2891 2893 * cache. If the block is found in the cache, invoke the provided
2892 2894 * callback immediately and return. Note that the `zio' parameter
2893 2895 * in the callback will be NULL in this case, since no IO was
2894 2896 * required. If the block is not in the cache pass the read request
2895 2897 * on to the spa with a substitute callback function, so that the
2896 2898 * requested block will be added to the cache.
2897 2899 *
2898 2900 * If a read request arrives for a block that has a read in-progress,
2899 2901 * either wait for the in-progress read to complete (and return the
2900 2902 * results); or, if this is a read with a "done" func, add a record
2901 2903 * to the read to invoke the "done" func when the read completes,
2902 2904 * and return; or just return.
2903 2905 *
2904 2906 * arc_read_done() will invoke all the requested "done" functions
2905 2907 * for readers of this block.
2906 2908 */
2907 2909 int
2908 2910 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2909 2911 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
2910 2912 const zbookmark_phys_t *zb)
2911 2913 {
2912 2914 arc_buf_hdr_t *hdr = NULL;
2913 2915 arc_buf_t *buf = NULL;
2914 2916 kmutex_t *hash_lock = NULL;
2915 2917 zio_t *rzio;
2916 2918 uint64_t guid = spa_load_guid(spa);
2917 2919
2918 2920 ASSERT(!BP_IS_EMBEDDED(bp) ||
2919 2921 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
2920 2922
2921 2923 top:
2922 2924 if (!BP_IS_EMBEDDED(bp)) {
2923 2925 /*
2924 2926 * Embedded BP's have no DVA and require no I/O to "read".
2925 2927 * Create an anonymous arc buf to back it.
2926 2928 */
2927 2929 hdr = buf_hash_find(guid, bp, &hash_lock);
2928 2930 }
2929 2931
2930 2932 if (hdr != NULL && hdr->b_datacnt > 0) {
2931 2933
2932 2934 *arc_flags |= ARC_CACHED;
2933 2935
2934 2936 if (HDR_IO_IN_PROGRESS(hdr)) {
2935 2937
2936 2938 if (*arc_flags & ARC_WAIT) {
2937 2939 cv_wait(&hdr->b_cv, hash_lock);
2938 2940 mutex_exit(hash_lock);
2939 2941 goto top;
2940 2942 }
2941 2943 ASSERT(*arc_flags & ARC_NOWAIT);
2942 2944
2943 2945 if (done) {
2944 2946 arc_callback_t *acb = NULL;
2945 2947
2946 2948 acb = kmem_zalloc(sizeof (arc_callback_t),
2947 2949 KM_SLEEP);
2948 2950 acb->acb_done = done;
2949 2951 acb->acb_private = private;
2950 2952 if (pio != NULL)
2951 2953 acb->acb_zio_dummy = zio_null(pio,
2952 2954 spa, NULL, NULL, NULL, zio_flags);
2953 2955
2954 2956 ASSERT(acb->acb_done != NULL);
2955 2957 acb->acb_next = hdr->b_acb;
2956 2958 hdr->b_acb = acb;
2957 2959 add_reference(hdr, hash_lock, private);
2958 2960 mutex_exit(hash_lock);
2959 2961 return (0);
2960 2962 }
2961 2963 mutex_exit(hash_lock);
2962 2964 return (0);
2963 2965 }
2964 2966
2965 2967 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2966 2968
2967 2969 if (done) {
2968 2970 add_reference(hdr, hash_lock, private);
2969 2971 /*
2970 2972 * If this block is already in use, create a new
2971 2973 * copy of the data so that we will be guaranteed
2972 2974 * that arc_release() will always succeed.
2973 2975 */
2974 2976 buf = hdr->b_buf;
2975 2977 ASSERT(buf);
2976 2978 ASSERT(buf->b_data);
2977 2979 if (HDR_BUF_AVAILABLE(hdr)) {
2978 2980 ASSERT(buf->b_efunc == NULL);
2979 2981 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2980 2982 } else {
2981 2983 buf = arc_buf_clone(buf);
2982 2984 }
2983 2985
2984 2986 } else if (*arc_flags & ARC_PREFETCH &&
2985 2987 refcount_count(&hdr->b_refcnt) == 0) {
2986 2988 hdr->b_flags |= ARC_PREFETCH;
2987 2989 }
2988 2990 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2989 2991 arc_access(hdr, hash_lock);
2990 2992 if (*arc_flags & ARC_L2CACHE)
2991 2993 hdr->b_flags |= ARC_L2CACHE;
2992 2994 if (*arc_flags & ARC_L2COMPRESS)
2993 2995 hdr->b_flags |= ARC_L2COMPRESS;
2994 2996 mutex_exit(hash_lock);
2995 2997 ARCSTAT_BUMP(arcstat_hits);
2996 2998 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2997 2999 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2998 3000 data, metadata, hits);
2999 3001
3000 3002 if (done)
3001 3003 done(NULL, buf, private);
3002 3004 } else {
3003 3005 uint64_t size = BP_GET_LSIZE(bp);
3004 3006 arc_callback_t *acb;
3005 3007 vdev_t *vd = NULL;
3006 3008 uint64_t addr = 0;
3007 3009 boolean_t devw = B_FALSE;
3008 3010 enum zio_compress b_compress = ZIO_COMPRESS_OFF;
3009 3011 uint64_t b_asize = 0;
3010 3012
3011 3013 if (hdr == NULL) {
3012 3014 /* this block is not in the cache */
3013 3015 arc_buf_hdr_t *exists = NULL;
3014 3016 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3015 3017 buf = arc_buf_alloc(spa, size, private, type);
3016 3018 hdr = buf->b_hdr;
3017 3019 if (!BP_IS_EMBEDDED(bp)) {
3018 3020 hdr->b_dva = *BP_IDENTITY(bp);
3019 3021 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3020 3022 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3021 3023 exists = buf_hash_insert(hdr, &hash_lock);
3022 3024 }
3023 3025 if (exists != NULL) {
3024 3026 /* somebody beat us to the hash insert */
3025 3027 mutex_exit(hash_lock);
3026 3028 buf_discard_identity(hdr);
3027 3029 (void) arc_buf_remove_ref(buf, private);
3028 3030 goto top; /* restart the IO request */
3029 3031 }
3030 3032 /* if this is a prefetch, we don't have a reference */
3031 3033 if (*arc_flags & ARC_PREFETCH) {
3032 3034 (void) remove_reference(hdr, hash_lock,
3033 3035 private);
3034 3036 hdr->b_flags |= ARC_PREFETCH;
3035 3037 }
3036 3038 if (*arc_flags & ARC_L2CACHE)
3037 3039 hdr->b_flags |= ARC_L2CACHE;
3038 3040 if (*arc_flags & ARC_L2COMPRESS)
3039 3041 hdr->b_flags |= ARC_L2COMPRESS;
3040 3042 if (BP_GET_LEVEL(bp) > 0)
3041 3043 hdr->b_flags |= ARC_INDIRECT;
3042 3044 } else {
3043 3045 /* this block is in the ghost cache */
3044 3046 ASSERT(GHOST_STATE(hdr->b_state));
3045 3047 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3046 3048 ASSERT0(refcount_count(&hdr->b_refcnt));
3047 3049 ASSERT(hdr->b_buf == NULL);
3048 3050
3049 3051 /* if this is a prefetch, we don't have a reference */
3050 3052 if (*arc_flags & ARC_PREFETCH)
3051 3053 hdr->b_flags |= ARC_PREFETCH;
3052 3054 else
3053 3055 add_reference(hdr, hash_lock, private);
3054 3056 if (*arc_flags & ARC_L2CACHE)
3055 3057 hdr->b_flags |= ARC_L2CACHE;
3056 3058 if (*arc_flags & ARC_L2COMPRESS)
3057 3059 hdr->b_flags |= ARC_L2COMPRESS;
3058 3060 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3059 3061 buf->b_hdr = hdr;
3060 3062 buf->b_data = NULL;
3061 3063 buf->b_efunc = NULL;
3062 3064 buf->b_private = NULL;
3063 3065 buf->b_next = NULL;
3064 3066 hdr->b_buf = buf;
3065 3067 ASSERT(hdr->b_datacnt == 0);
3066 3068 hdr->b_datacnt = 1;
3067 3069 arc_get_data_buf(buf);
3068 3070 arc_access(hdr, hash_lock);
3069 3071 }
3070 3072
3071 3073 ASSERT(!GHOST_STATE(hdr->b_state));
3072 3074
3073 3075 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3074 3076 acb->acb_done = done;
3075 3077 acb->acb_private = private;
3076 3078
3077 3079 ASSERT(hdr->b_acb == NULL);
3078 3080 hdr->b_acb = acb;
3079 3081 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3080 3082
3081 3083 if (hdr->b_l2hdr != NULL &&
3082 3084 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3083 3085 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3084 3086 addr = hdr->b_l2hdr->b_daddr;
3085 3087 b_compress = hdr->b_l2hdr->b_compress;
3086 3088 b_asize = hdr->b_l2hdr->b_asize;
3087 3089 /*
3088 3090 * Lock out device removal.
3089 3091 */
3090 3092 if (vdev_is_dead(vd) ||
3091 3093 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3092 3094 vd = NULL;
3093 3095 }
3094 3096
3095 3097 if (hash_lock != NULL)
3096 3098 mutex_exit(hash_lock);
3097 3099
3098 3100 /*
3099 3101 * At this point, we have a level 1 cache miss. Try again in
3100 3102 * L2ARC if possible.
3101 3103 */
3102 3104 ASSERT3U(hdr->b_size, ==, size);
3103 3105 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3104 3106 uint64_t, size, zbookmark_phys_t *, zb);
3105 3107 ARCSTAT_BUMP(arcstat_misses);
3106 3108 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3107 3109 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3108 3110 data, metadata, misses);
3109 3111
3110 3112 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3111 3113 /*
3112 3114 * Read from the L2ARC if the following are true:
3113 3115 * 1. The L2ARC vdev was previously cached.
3114 3116 * 2. This buffer still has L2ARC metadata.
3115 3117 * 3. This buffer isn't currently writing to the L2ARC.
3116 3118 * 4. The L2ARC entry wasn't evicted, which may
3117 3119 * also have invalidated the vdev.
3118 3120 * 5. This isn't prefetch and l2arc_noprefetch is set.
3119 3121 */
3120 3122 if (hdr->b_l2hdr != NULL &&
3121 3123 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3122 3124 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3123 3125 l2arc_read_callback_t *cb;
3124 3126
3125 3127 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3126 3128 ARCSTAT_BUMP(arcstat_l2_hits);
3127 3129
3128 3130 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3129 3131 KM_SLEEP);
3130 3132 cb->l2rcb_buf = buf;
3131 3133 cb->l2rcb_spa = spa;
3132 3134 cb->l2rcb_bp = *bp;
3133 3135 cb->l2rcb_zb = *zb;
3134 3136 cb->l2rcb_flags = zio_flags;
3135 3137 cb->l2rcb_compress = b_compress;
3136 3138
3137 3139 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3138 3140 addr + size < vd->vdev_psize -
3139 3141 VDEV_LABEL_END_SIZE);
3140 3142
3141 3143 /*
3142 3144 * l2arc read. The SCL_L2ARC lock will be
3143 3145 * released by l2arc_read_done().
3144 3146 * Issue a null zio if the underlying buffer
3145 3147 * was squashed to zero size by compression.
3146 3148 */
3147 3149 if (b_compress == ZIO_COMPRESS_EMPTY) {
3148 3150 rzio = zio_null(pio, spa, vd,
3149 3151 l2arc_read_done, cb,
3150 3152 zio_flags | ZIO_FLAG_DONT_CACHE |
3151 3153 ZIO_FLAG_CANFAIL |
3152 3154 ZIO_FLAG_DONT_PROPAGATE |
3153 3155 ZIO_FLAG_DONT_RETRY);
3154 3156 } else {
3155 3157 rzio = zio_read_phys(pio, vd, addr,
3156 3158 b_asize, buf->b_data,
3157 3159 ZIO_CHECKSUM_OFF,
3158 3160 l2arc_read_done, cb, priority,
3159 3161 zio_flags | ZIO_FLAG_DONT_CACHE |
3160 3162 ZIO_FLAG_CANFAIL |
3161 3163 ZIO_FLAG_DONT_PROPAGATE |
3162 3164 ZIO_FLAG_DONT_RETRY, B_FALSE);
3163 3165 }
3164 3166 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3165 3167 zio_t *, rzio);
3166 3168 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
3167 3169
3168 3170 if (*arc_flags & ARC_NOWAIT) {
3169 3171 zio_nowait(rzio);
3170 3172 return (0);
3171 3173 }
3172 3174
3173 3175 ASSERT(*arc_flags & ARC_WAIT);
3174 3176 if (zio_wait(rzio) == 0)
3175 3177 return (0);
3176 3178
3177 3179 /* l2arc read error; goto zio_read() */
3178 3180 } else {
3179 3181 DTRACE_PROBE1(l2arc__miss,
3180 3182 arc_buf_hdr_t *, hdr);
3181 3183 ARCSTAT_BUMP(arcstat_l2_misses);
3182 3184 if (HDR_L2_WRITING(hdr))
3183 3185 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3184 3186 spa_config_exit(spa, SCL_L2ARC, vd);
3185 3187 }
3186 3188 } else {
3187 3189 if (vd != NULL)
3188 3190 spa_config_exit(spa, SCL_L2ARC, vd);
3189 3191 if (l2arc_ndev != 0) {
3190 3192 DTRACE_PROBE1(l2arc__miss,
3191 3193 arc_buf_hdr_t *, hdr);
3192 3194 ARCSTAT_BUMP(arcstat_l2_misses);
3193 3195 }
3194 3196 }
3195 3197
3196 3198 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3197 3199 arc_read_done, buf, priority, zio_flags, zb);
3198 3200
3199 3201 if (*arc_flags & ARC_WAIT)
3200 3202 return (zio_wait(rzio));
3201 3203
3202 3204 ASSERT(*arc_flags & ARC_NOWAIT);
3203 3205 zio_nowait(rzio);
3204 3206 }
3205 3207 return (0);
3206 3208 }
3207 3209
3208 3210 void
3209 3211 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3210 3212 {
3211 3213 ASSERT(buf->b_hdr != NULL);
3212 3214 ASSERT(buf->b_hdr->b_state != arc_anon);
3213 3215 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3214 3216 ASSERT(buf->b_efunc == NULL);
3215 3217 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3216 3218
3217 3219 buf->b_efunc = func;
3218 3220 buf->b_private = private;
3219 3221 }
3220 3222
3221 3223 /*
3222 3224 * Notify the arc that a block was freed, and thus will never be used again.
3223 3225 */
3224 3226 void
3225 3227 arc_freed(spa_t *spa, const blkptr_t *bp)
3226 3228 {
3227 3229 arc_buf_hdr_t *hdr;
3228 3230 kmutex_t *hash_lock;
3229 3231 uint64_t guid = spa_load_guid(spa);
3230 3232
3231 3233 ASSERT(!BP_IS_EMBEDDED(bp));
3232 3234
3233 3235 hdr = buf_hash_find(guid, bp, &hash_lock);
3234 3236 if (hdr == NULL)
3235 3237 return;
3236 3238 if (HDR_BUF_AVAILABLE(hdr)) {
3237 3239 arc_buf_t *buf = hdr->b_buf;
3238 3240 add_reference(hdr, hash_lock, FTAG);
3239 3241 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3240 3242 mutex_exit(hash_lock);
3241 3243
3242 3244 arc_release(buf, FTAG);
3243 3245 (void) arc_buf_remove_ref(buf, FTAG);
3244 3246 } else {
3245 3247 mutex_exit(hash_lock);
3246 3248 }
3247 3249
3248 3250 }
3249 3251
3250 3252 /*
3251 3253 * Clear the user eviction callback set by arc_set_callback(), first calling
3252 3254 * it if it exists. Because the presence of a callback keeps an arc_buf cached
3253 3255 * clearing the callback may result in the arc_buf being destroyed. However,
3254 3256 * it will not result in the *last* arc_buf being destroyed, hence the data
3255 3257 * will remain cached in the ARC. We make a copy of the arc buffer here so
3256 3258 * that we can process the callback without holding any locks.
3257 3259 *
3258 3260 * It's possible that the callback is already in the process of being cleared
3259 3261 * by another thread. In this case we can not clear the callback.
3260 3262 *
3261 3263 * Returns B_TRUE if the callback was successfully called and cleared.
3262 3264 */
3263 3265 boolean_t
3264 3266 arc_clear_callback(arc_buf_t *buf)
3265 3267 {
3266 3268 arc_buf_hdr_t *hdr;
3267 3269 kmutex_t *hash_lock;
3268 3270 arc_evict_func_t *efunc = buf->b_efunc;
3269 3271 void *private = buf->b_private;
3270 3272
3271 3273 mutex_enter(&buf->b_evict_lock);
3272 3274 hdr = buf->b_hdr;
3273 3275 if (hdr == NULL) {
3274 3276 /*
3275 3277 * We are in arc_do_user_evicts().
3276 3278 */
3277 3279 ASSERT(buf->b_data == NULL);
3278 3280 mutex_exit(&buf->b_evict_lock);
3279 3281 return (B_FALSE);
3280 3282 } else if (buf->b_data == NULL) {
3281 3283 /*
3282 3284 * We are on the eviction list; process this buffer now
3283 3285 * but let arc_do_user_evicts() do the reaping.
3284 3286 */
3285 3287 buf->b_efunc = NULL;
3286 3288 mutex_exit(&buf->b_evict_lock);
3287 3289 VERIFY0(efunc(private));
3288 3290 return (B_TRUE);
3289 3291 }
3290 3292 hash_lock = HDR_LOCK(hdr);
3291 3293 mutex_enter(hash_lock);
3292 3294 hdr = buf->b_hdr;
3293 3295 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3294 3296
3295 3297 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3296 3298 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3297 3299
3298 3300 buf->b_efunc = NULL;
3299 3301 buf->b_private = NULL;
3300 3302
3301 3303 if (hdr->b_datacnt > 1) {
3302 3304 mutex_exit(&buf->b_evict_lock);
3303 3305 arc_buf_destroy(buf, FALSE, TRUE);
3304 3306 } else {
3305 3307 ASSERT(buf == hdr->b_buf);
3306 3308 hdr->b_flags |= ARC_BUF_AVAILABLE;
3307 3309 mutex_exit(&buf->b_evict_lock);
3308 3310 }
3309 3311
3310 3312 mutex_exit(hash_lock);
3311 3313 VERIFY0(efunc(private));
3312 3314 return (B_TRUE);
3313 3315 }
3314 3316
3315 3317 /*
3316 3318 * Release this buffer from the cache, making it an anonymous buffer. This
3317 3319 * must be done after a read and prior to modifying the buffer contents.
3318 3320 * If the buffer has more than one reference, we must make
3319 3321 * a new hdr for the buffer.
3320 3322 */
3321 3323 void
3322 3324 arc_release(arc_buf_t *buf, void *tag)
3323 3325 {
3324 3326 arc_buf_hdr_t *hdr;
3325 3327 kmutex_t *hash_lock = NULL;
3326 3328 l2arc_buf_hdr_t *l2hdr;
3327 3329 uint64_t buf_size;
3328 3330
3329 3331 /*
3330 3332 * It would be nice to assert that if it's DMU metadata (level >
3331 3333 * 0 || it's the dnode file), then it must be syncing context.
3332 3334 * But we don't know that information at this level.
3333 3335 */
3334 3336
3335 3337 mutex_enter(&buf->b_evict_lock);
3336 3338 hdr = buf->b_hdr;
3337 3339
3338 3340 /* this buffer is not on any list */
3339 3341 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3340 3342
3341 3343 if (hdr->b_state == arc_anon) {
3342 3344 /* this buffer is already released */
3343 3345 ASSERT(buf->b_efunc == NULL);
3344 3346 } else {
3345 3347 hash_lock = HDR_LOCK(hdr);
3346 3348 mutex_enter(hash_lock);
3347 3349 hdr = buf->b_hdr;
3348 3350 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3349 3351 }
3350 3352
3351 3353 l2hdr = hdr->b_l2hdr;
3352 3354 if (l2hdr) {
3353 3355 mutex_enter(&l2arc_buflist_mtx);
3354 3356 hdr->b_l2hdr = NULL;
3355 3357 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3356 3358 }
3357 3359 buf_size = hdr->b_size;
3358 3360
3359 3361 /*
3360 3362 * Do we have more than one buf?
3361 3363 */
3362 3364 if (hdr->b_datacnt > 1) {
3363 3365 arc_buf_hdr_t *nhdr;
3364 3366 arc_buf_t **bufp;
3365 3367 uint64_t blksz = hdr->b_size;
3366 3368 uint64_t spa = hdr->b_spa;
3367 3369 arc_buf_contents_t type = hdr->b_type;
3368 3370 uint32_t flags = hdr->b_flags;
3369 3371
3370 3372 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3371 3373 /*
3372 3374 * Pull the data off of this hdr and attach it to
3373 3375 * a new anonymous hdr.
3374 3376 */
3375 3377 (void) remove_reference(hdr, hash_lock, tag);
3376 3378 bufp = &hdr->b_buf;
3377 3379 while (*bufp != buf)
3378 3380 bufp = &(*bufp)->b_next;
3379 3381 *bufp = buf->b_next;
3380 3382 buf->b_next = NULL;
3381 3383
3382 3384 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3383 3385 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3384 3386 if (refcount_is_zero(&hdr->b_refcnt)) {
3385 3387 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3386 3388 ASSERT3U(*size, >=, hdr->b_size);
3387 3389 atomic_add_64(size, -hdr->b_size);
3388 3390 }
3389 3391
3390 3392 /*
3391 3393 * We're releasing a duplicate user data buffer, update
3392 3394 * our statistics accordingly.
3393 3395 */
3394 3396 if (hdr->b_type == ARC_BUFC_DATA) {
3395 3397 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3396 3398 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3397 3399 -hdr->b_size);
3398 3400 }
3399 3401 hdr->b_datacnt -= 1;
3400 3402 arc_cksum_verify(buf);
3401 3403 arc_buf_unwatch(buf);
3402 3404
3403 3405 mutex_exit(hash_lock);
3404 3406
3405 3407 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3406 3408 nhdr->b_size = blksz;
3407 3409 nhdr->b_spa = spa;
3408 3410 nhdr->b_type = type;
3409 3411 nhdr->b_buf = buf;
3410 3412 nhdr->b_state = arc_anon;
3411 3413 nhdr->b_arc_access = 0;
3412 3414 nhdr->b_flags = flags & ARC_L2_WRITING;
3413 3415 nhdr->b_l2hdr = NULL;
3414 3416 nhdr->b_datacnt = 1;
3415 3417 nhdr->b_freeze_cksum = NULL;
3416 3418 (void) refcount_add(&nhdr->b_refcnt, tag);
3417 3419 buf->b_hdr = nhdr;
3418 3420 mutex_exit(&buf->b_evict_lock);
3419 3421 atomic_add_64(&arc_anon->arcs_size, blksz);
3420 3422 } else {
3421 3423 mutex_exit(&buf->b_evict_lock);
3422 3424 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3423 3425 ASSERT(!list_link_active(&hdr->b_arc_node));
3424 3426 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3425 3427 if (hdr->b_state != arc_anon)
3426 3428 arc_change_state(arc_anon, hdr, hash_lock);
3427 3429 hdr->b_arc_access = 0;
3428 3430 if (hash_lock)
3429 3431 mutex_exit(hash_lock);
3430 3432
3431 3433 buf_discard_identity(hdr);
3432 3434 arc_buf_thaw(buf);
3433 3435 }
3434 3436 buf->b_efunc = NULL;
3435 3437 buf->b_private = NULL;
3436 3438
3437 3439 if (l2hdr) {
3438 3440 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3439 3441 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
3440 3442 -l2hdr->b_asize, 0, 0);
3441 3443 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3442 3444 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3443 3445 mutex_exit(&l2arc_buflist_mtx);
3444 3446 }
3445 3447 }
3446 3448
3447 3449 int
3448 3450 arc_released(arc_buf_t *buf)
3449 3451 {
3450 3452 int released;
3451 3453
3452 3454 mutex_enter(&buf->b_evict_lock);
3453 3455 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3454 3456 mutex_exit(&buf->b_evict_lock);
3455 3457 return (released);
3456 3458 }
3457 3459
3458 3460 #ifdef ZFS_DEBUG
3459 3461 int
3460 3462 arc_referenced(arc_buf_t *buf)
3461 3463 {
3462 3464 int referenced;
3463 3465
3464 3466 mutex_enter(&buf->b_evict_lock);
3465 3467 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3466 3468 mutex_exit(&buf->b_evict_lock);
3467 3469 return (referenced);
3468 3470 }
3469 3471 #endif
3470 3472
3471 3473 static void
3472 3474 arc_write_ready(zio_t *zio)
3473 3475 {
3474 3476 arc_write_callback_t *callback = zio->io_private;
3475 3477 arc_buf_t *buf = callback->awcb_buf;
3476 3478 arc_buf_hdr_t *hdr = buf->b_hdr;
3477 3479
3478 3480 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3479 3481 callback->awcb_ready(zio, buf, callback->awcb_private);
3480 3482
3481 3483 /*
3482 3484 * If the IO is already in progress, then this is a re-write
3483 3485 * attempt, so we need to thaw and re-compute the cksum.
3484 3486 * It is the responsibility of the callback to handle the
3485 3487 * accounting for any re-write attempt.
3486 3488 */
3487 3489 if (HDR_IO_IN_PROGRESS(hdr)) {
3488 3490 mutex_enter(&hdr->b_freeze_lock);
3489 3491 if (hdr->b_freeze_cksum != NULL) {
3490 3492 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3491 3493 hdr->b_freeze_cksum = NULL;
3492 3494 }
3493 3495 mutex_exit(&hdr->b_freeze_lock);
3494 3496 }
3495 3497 arc_cksum_compute(buf, B_FALSE);
3496 3498 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3497 3499 }
3498 3500
3499 3501 /*
3500 3502 * The SPA calls this callback for each physical write that happens on behalf
3501 3503 * of a logical write. See the comment in dbuf_write_physdone() for details.
3502 3504 */
3503 3505 static void
3504 3506 arc_write_physdone(zio_t *zio)
3505 3507 {
3506 3508 arc_write_callback_t *cb = zio->io_private;
3507 3509 if (cb->awcb_physdone != NULL)
3508 3510 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3509 3511 }
3510 3512
3511 3513 static void
3512 3514 arc_write_done(zio_t *zio)
3513 3515 {
3514 3516 arc_write_callback_t *callback = zio->io_private;
3515 3517 arc_buf_t *buf = callback->awcb_buf;
3516 3518 arc_buf_hdr_t *hdr = buf->b_hdr;
3517 3519
3518 3520 ASSERT(hdr->b_acb == NULL);
3519 3521
3520 3522 if (zio->io_error == 0) {
3521 3523 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
3522 3524 buf_discard_identity(hdr);
3523 3525 } else {
3524 3526 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3525 3527 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3526 3528 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3527 3529 }
3528 3530 } else {
3529 3531 ASSERT(BUF_EMPTY(hdr));
3530 3532 }
3531 3533
3532 3534 /*
3533 3535 * If the block to be written was all-zero or compressed enough to be
3534 3536 * embedded in the BP, no write was performed so there will be no
3535 3537 * dva/birth/checksum. The buffer must therefore remain anonymous
3536 3538 * (and uncached).
3537 3539 */
3538 3540 if (!BUF_EMPTY(hdr)) {
3539 3541 arc_buf_hdr_t *exists;
3540 3542 kmutex_t *hash_lock;
3541 3543
3542 3544 ASSERT(zio->io_error == 0);
3543 3545
3544 3546 arc_cksum_verify(buf);
3545 3547
3546 3548 exists = buf_hash_insert(hdr, &hash_lock);
3547 3549 if (exists) {
3548 3550 /*
3549 3551 * This can only happen if we overwrite for
3550 3552 * sync-to-convergence, because we remove
3551 3553 * buffers from the hash table when we arc_free().
3552 3554 */
3553 3555 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3554 3556 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3555 3557 panic("bad overwrite, hdr=%p exists=%p",
3556 3558 (void *)hdr, (void *)exists);
3557 3559 ASSERT(refcount_is_zero(&exists->b_refcnt));
3558 3560 arc_change_state(arc_anon, exists, hash_lock);
3559 3561 mutex_exit(hash_lock);
3560 3562 arc_hdr_destroy(exists);
3561 3563 exists = buf_hash_insert(hdr, &hash_lock);
3562 3564 ASSERT3P(exists, ==, NULL);
3563 3565 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3564 3566 /* nopwrite */
3565 3567 ASSERT(zio->io_prop.zp_nopwrite);
3566 3568 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3567 3569 panic("bad nopwrite, hdr=%p exists=%p",
3568 3570 (void *)hdr, (void *)exists);
3569 3571 } else {
3570 3572 /* Dedup */
3571 3573 ASSERT(hdr->b_datacnt == 1);
3572 3574 ASSERT(hdr->b_state == arc_anon);
3573 3575 ASSERT(BP_GET_DEDUP(zio->io_bp));
3574 3576 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3575 3577 }
3576 3578 }
3577 3579 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3578 3580 /* if it's not anon, we are doing a scrub */
3579 3581 if (!exists && hdr->b_state == arc_anon)
3580 3582 arc_access(hdr, hash_lock);
3581 3583 mutex_exit(hash_lock);
3582 3584 } else {
3583 3585 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3584 3586 }
3585 3587
3586 3588 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3587 3589 callback->awcb_done(zio, buf, callback->awcb_private);
3588 3590
3589 3591 kmem_free(callback, sizeof (arc_write_callback_t));
3590 3592 }
3591 3593
3592 3594 zio_t *
3593 3595 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3594 3596 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3595 3597 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3596 3598 arc_done_func_t *done, void *private, zio_priority_t priority,
3597 3599 int zio_flags, const zbookmark_phys_t *zb)
3598 3600 {
3599 3601 arc_buf_hdr_t *hdr = buf->b_hdr;
3600 3602 arc_write_callback_t *callback;
3601 3603 zio_t *zio;
3602 3604
3603 3605 ASSERT(ready != NULL);
3604 3606 ASSERT(done != NULL);
3605 3607 ASSERT(!HDR_IO_ERROR(hdr));
3606 3608 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3607 3609 ASSERT(hdr->b_acb == NULL);
3608 3610 if (l2arc)
3609 3611 hdr->b_flags |= ARC_L2CACHE;
3610 3612 if (l2arc_compress)
3611 3613 hdr->b_flags |= ARC_L2COMPRESS;
3612 3614 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3613 3615 callback->awcb_ready = ready;
3614 3616 callback->awcb_physdone = physdone;
3615 3617 callback->awcb_done = done;
3616 3618 callback->awcb_private = private;
3617 3619 callback->awcb_buf = buf;
3618 3620
3619 3621 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3620 3622 arc_write_ready, arc_write_physdone, arc_write_done, callback,
3621 3623 priority, zio_flags, zb);
3622 3624
3623 3625 return (zio);
3624 3626 }
3625 3627
3626 3628 static int
3627 3629 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3628 3630 {
3629 3631 #ifdef _KERNEL
3630 3632 uint64_t available_memory = ptob(freemem);
3631 3633 static uint64_t page_load = 0;
3632 3634 static uint64_t last_txg = 0;
3633 3635
3634 3636 #if defined(__i386)
3635 3637 available_memory =
3636 3638 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3637 3639 #endif
3638 3640
3639 3641 if (freemem > physmem * arc_lotsfree_percent / 100)
3640 3642 return (0);
3641 3643
3642 3644 if (txg > last_txg) {
3643 3645 last_txg = txg;
3644 3646 page_load = 0;
3645 3647 }
3646 3648 /*
3647 3649 * If we are in pageout, we know that memory is already tight,
3648 3650 * the arc is already going to be evicting, so we just want to
3649 3651 * continue to let page writes occur as quickly as possible.
3650 3652 */
3651 3653 if (curproc == proc_pageout) {
3652 3654 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3653 3655 return (SET_ERROR(ERESTART));
3654 3656 /* Note: reserve is inflated, so we deflate */
3655 3657 page_load += reserve / 8;
3656 3658 return (0);
3657 3659 } else if (page_load > 0 && arc_reclaim_needed()) {
3658 3660 /* memory is low, delay before restarting */
3659 3661 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3660 3662 return (SET_ERROR(EAGAIN));
3661 3663 }
3662 3664 page_load = 0;
3663 3665 #endif
3664 3666 return (0);
3665 3667 }
3666 3668
3667 3669 void
3668 3670 arc_tempreserve_clear(uint64_t reserve)
3669 3671 {
3670 3672 atomic_add_64(&arc_tempreserve, -reserve);
3671 3673 ASSERT((int64_t)arc_tempreserve >= 0);
3672 3674 }
3673 3675
3674 3676 int
3675 3677 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3676 3678 {
3677 3679 int error;
3678 3680 uint64_t anon_size;
3679 3681
3680 3682 if (reserve > arc_c/4 && !arc_no_grow)
3681 3683 arc_c = MIN(arc_c_max, reserve * 4);
3682 3684 if (reserve > arc_c)
3683 3685 return (SET_ERROR(ENOMEM));
3684 3686
3685 3687 /*
3686 3688 * Don't count loaned bufs as in flight dirty data to prevent long
3687 3689 * network delays from blocking transactions that are ready to be
3688 3690 * assigned to a txg.
3689 3691 */
3690 3692 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3691 3693
3692 3694 /*
3693 3695 * Writes will, almost always, require additional memory allocations
3694 3696 * in order to compress/encrypt/etc the data. We therefore need to
3695 3697 * make sure that there is sufficient available memory for this.
3696 3698 */
3697 3699 error = arc_memory_throttle(reserve, txg);
3698 3700 if (error != 0)
3699 3701 return (error);
3700 3702
3701 3703 /*
3702 3704 * Throttle writes when the amount of dirty data in the cache
3703 3705 * gets too large. We try to keep the cache less than half full
3704 3706 * of dirty blocks so that our sync times don't grow too large.
3705 3707 * Note: if two requests come in concurrently, we might let them
3706 3708 * both succeed, when one of them should fail. Not a huge deal.
3707 3709 */
3708 3710
3709 3711 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3710 3712 anon_size > arc_c / 4) {
3711 3713 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3712 3714 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3713 3715 arc_tempreserve>>10,
3714 3716 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3715 3717 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3716 3718 reserve>>10, arc_c>>10);
3717 3719 return (SET_ERROR(ERESTART));
3718 3720 }
3719 3721 atomic_add_64(&arc_tempreserve, reserve);
3720 3722 return (0);
3721 3723 }
3722 3724
3723 3725 void
3724 3726 arc_init(void)
3725 3727 {
3726 3728 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3727 3729 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3728 3730
3729 3731 /* Convert seconds to clock ticks */
3730 3732 arc_min_prefetch_lifespan = 1 * hz;
3731 3733
3732 3734 /* Start out with 1/8 of all memory */
3733 3735 arc_c = physmem * PAGESIZE / 8;
3734 3736
3735 3737 #ifdef _KERNEL
3736 3738 /*
3737 3739 * On architectures where the physical memory can be larger
3738 3740 * than the addressable space (intel in 32-bit mode), we may
3739 3741 * need to limit the cache to 1/8 of VM size.
3740 3742 */
3741 3743 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3742 3744 #endif
3743 3745
3744 3746 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3745 3747 arc_c_min = MAX(arc_c / 4, 64<<20);
3746 3748 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3747 3749 if (arc_c * 8 >= 1<<30)
3748 3750 arc_c_max = (arc_c * 8) - (1<<30);
3749 3751 else
3750 3752 arc_c_max = arc_c_min;
3751 3753 arc_c_max = MAX(arc_c * 6, arc_c_max);
3752 3754
3753 3755 /*
3754 3756 * Allow the tunables to override our calculations if they are
3755 3757 * reasonable (ie. over 64MB)
3756 3758 */
3757 3759 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3758 3760 arc_c_max = zfs_arc_max;
3759 3761 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3760 3762 arc_c_min = zfs_arc_min;
3761 3763
3762 3764 arc_c = arc_c_max;
3763 3765 arc_p = (arc_c >> 1);
3764 3766
3765 3767 /* limit meta-data to 1/4 of the arc capacity */
3766 3768 arc_meta_limit = arc_c_max / 4;
3767 3769
3768 3770 /* Allow the tunable to override if it is reasonable */
3769 3771 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3770 3772 arc_meta_limit = zfs_arc_meta_limit;
3771 3773
3772 3774 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3773 3775 arc_c_min = arc_meta_limit / 2;
3774 3776
3775 3777 if (zfs_arc_grow_retry > 0)
3776 3778 arc_grow_retry = zfs_arc_grow_retry;
3777 3779
3778 3780 if (zfs_arc_shrink_shift > 0)
3779 3781 arc_shrink_shift = zfs_arc_shrink_shift;
3780 3782
3781 3783 if (zfs_arc_p_min_shift > 0)
3782 3784 arc_p_min_shift = zfs_arc_p_min_shift;
3783 3785
3784 3786 /* if kmem_flags are set, lets try to use less memory */
3785 3787 if (kmem_debugging())
3786 3788 arc_c = arc_c / 2;
3787 3789 if (arc_c < arc_c_min)
3788 3790 arc_c = arc_c_min;
3789 3791
3790 3792 arc_anon = &ARC_anon;
3791 3793 arc_mru = &ARC_mru;
3792 3794 arc_mru_ghost = &ARC_mru_ghost;
3793 3795 arc_mfu = &ARC_mfu;
3794 3796 arc_mfu_ghost = &ARC_mfu_ghost;
3795 3797 arc_l2c_only = &ARC_l2c_only;
3796 3798 arc_size = 0;
3797 3799
3798 3800 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3799 3801 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3800 3802 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3801 3803 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3802 3804 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3803 3805 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3804 3806
3805 3807 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3806 3808 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3807 3809 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3808 3810 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3809 3811 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3810 3812 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3811 3813 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3812 3814 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3813 3815 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3814 3816 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3815 3817 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3816 3818 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3817 3819 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3818 3820 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3819 3821 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3820 3822 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3821 3823 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3822 3824 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3823 3825 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3824 3826 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3825 3827
3826 3828 buf_init();
3827 3829
3828 3830 arc_thread_exit = 0;
3829 3831 arc_eviction_list = NULL;
3830 3832 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3831 3833 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3832 3834
3833 3835 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3834 3836 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3835 3837
3836 3838 if (arc_ksp != NULL) {
3837 3839 arc_ksp->ks_data = &arc_stats;
3838 3840 kstat_install(arc_ksp);
3839 3841 }
3840 3842
3841 3843 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3842 3844 TS_RUN, minclsyspri);
3843 3845
3844 3846 arc_dead = FALSE;
3845 3847 arc_warm = B_FALSE;
3846 3848
3847 3849 /*
3848 3850 * Calculate maximum amount of dirty data per pool.
3849 3851 *
3850 3852 * If it has been set by /etc/system, take that.
3851 3853 * Otherwise, use a percentage of physical memory defined by
3852 3854 * zfs_dirty_data_max_percent (default 10%) with a cap at
3853 3855 * zfs_dirty_data_max_max (default 4GB).
3854 3856 */
3855 3857 if (zfs_dirty_data_max == 0) {
3856 3858 zfs_dirty_data_max = physmem * PAGESIZE *
3857 3859 zfs_dirty_data_max_percent / 100;
3858 3860 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
3859 3861 zfs_dirty_data_max_max);
3860 3862 }
3861 3863 }
3862 3864
3863 3865 void
3864 3866 arc_fini(void)
3865 3867 {
3866 3868 mutex_enter(&arc_reclaim_thr_lock);
3867 3869 arc_thread_exit = 1;
3868 3870 while (arc_thread_exit != 0)
3869 3871 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3870 3872 mutex_exit(&arc_reclaim_thr_lock);
3871 3873
3872 3874 arc_flush(NULL);
3873 3875
3874 3876 arc_dead = TRUE;
3875 3877
3876 3878 if (arc_ksp != NULL) {
3877 3879 kstat_delete(arc_ksp);
3878 3880 arc_ksp = NULL;
3879 3881 }
3880 3882
3881 3883 mutex_destroy(&arc_eviction_mtx);
3882 3884 mutex_destroy(&arc_reclaim_thr_lock);
3883 3885 cv_destroy(&arc_reclaim_thr_cv);
3884 3886
3885 3887 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3886 3888 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3887 3889 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3888 3890 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3889 3891 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3890 3892 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3891 3893 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3892 3894 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3893 3895
3894 3896 mutex_destroy(&arc_anon->arcs_mtx);
3895 3897 mutex_destroy(&arc_mru->arcs_mtx);
3896 3898 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3897 3899 mutex_destroy(&arc_mfu->arcs_mtx);
3898 3900 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3899 3901 mutex_destroy(&arc_l2c_only->arcs_mtx);
3900 3902
3901 3903 buf_fini();
3902 3904
3903 3905 ASSERT(arc_loaned_bytes == 0);
3904 3906 }
3905 3907
3906 3908 /*
3907 3909 * Level 2 ARC
3908 3910 *
3909 3911 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3910 3912 * It uses dedicated storage devices to hold cached data, which are populated
3911 3913 * using large infrequent writes. The main role of this cache is to boost
3912 3914 * the performance of random read workloads. The intended L2ARC devices
3913 3915 * include short-stroked disks, solid state disks, and other media with
3914 3916 * substantially faster read latency than disk.
3915 3917 *
3916 3918 * +-----------------------+
3917 3919 * | ARC |
3918 3920 * +-----------------------+
3919 3921 * | ^ ^
3920 3922 * | | |
3921 3923 * l2arc_feed_thread() arc_read()
3922 3924 * | | |
3923 3925 * | l2arc read |
3924 3926 * V | |
3925 3927 * +---------------+ |
3926 3928 * | L2ARC | |
3927 3929 * +---------------+ |
3928 3930 * | ^ |
3929 3931 * l2arc_write() | |
3930 3932 * | | |
3931 3933 * V | |
3932 3934 * +-------+ +-------+
3933 3935 * | vdev | | vdev |
3934 3936 * | cache | | cache |
3935 3937 * +-------+ +-------+
3936 3938 * +=========+ .-----.
3937 3939 * : L2ARC : |-_____-|
3938 3940 * : devices : | Disks |
3939 3941 * +=========+ `-_____-'
3940 3942 *
3941 3943 * Read requests are satisfied from the following sources, in order:
3942 3944 *
3943 3945 * 1) ARC
3944 3946 * 2) vdev cache of L2ARC devices
3945 3947 * 3) L2ARC devices
3946 3948 * 4) vdev cache of disks
3947 3949 * 5) disks
3948 3950 *
3949 3951 * Some L2ARC device types exhibit extremely slow write performance.
3950 3952 * To accommodate for this there are some significant differences between
3951 3953 * the L2ARC and traditional cache design:
3952 3954 *
3953 3955 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3954 3956 * the ARC behave as usual, freeing buffers and placing headers on ghost
3955 3957 * lists. The ARC does not send buffers to the L2ARC during eviction as
3956 3958 * this would add inflated write latencies for all ARC memory pressure.
3957 3959 *
3958 3960 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3959 3961 * It does this by periodically scanning buffers from the eviction-end of
3960 3962 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3961 3963 * not already there. It scans until a headroom of buffers is satisfied,
3962 3964 * which itself is a buffer for ARC eviction. If a compressible buffer is
3963 3965 * found during scanning and selected for writing to an L2ARC device, we
3964 3966 * temporarily boost scanning headroom during the next scan cycle to make
3965 3967 * sure we adapt to compression effects (which might significantly reduce
3966 3968 * the data volume we write to L2ARC). The thread that does this is
3967 3969 * l2arc_feed_thread(), illustrated below; example sizes are included to
3968 3970 * provide a better sense of ratio than this diagram:
3969 3971 *
3970 3972 * head --> tail
3971 3973 * +---------------------+----------+
3972 3974 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3973 3975 * +---------------------+----------+ | o L2ARC eligible
3974 3976 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3975 3977 * +---------------------+----------+ |
3976 3978 * 15.9 Gbytes ^ 32 Mbytes |
3977 3979 * headroom |
3978 3980 * l2arc_feed_thread()
3979 3981 * |
3980 3982 * l2arc write hand <--[oooo]--'
3981 3983 * | 8 Mbyte
3982 3984 * | write max
3983 3985 * V
3984 3986 * +==============================+
3985 3987 * L2ARC dev |####|#|###|###| |####| ... |
3986 3988 * +==============================+
3987 3989 * 32 Gbytes
3988 3990 *
3989 3991 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3990 3992 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3991 3993 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3992 3994 * safe to say that this is an uncommon case, since buffers at the end of
3993 3995 * the ARC lists have moved there due to inactivity.
3994 3996 *
3995 3997 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3996 3998 * then the L2ARC simply misses copying some buffers. This serves as a
3997 3999 * pressure valve to prevent heavy read workloads from both stalling the ARC
3998 4000 * with waits and clogging the L2ARC with writes. This also helps prevent
3999 4001 * the potential for the L2ARC to churn if it attempts to cache content too
4000 4002 * quickly, such as during backups of the entire pool.
4001 4003 *
4002 4004 * 5. After system boot and before the ARC has filled main memory, there are
4003 4005 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4004 4006 * lists can remain mostly static. Instead of searching from tail of these
4005 4007 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4006 4008 * for eligible buffers, greatly increasing its chance of finding them.
4007 4009 *
4008 4010 * The L2ARC device write speed is also boosted during this time so that
4009 4011 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4010 4012 * there are no L2ARC reads, and no fear of degrading read performance
4011 4013 * through increased writes.
4012 4014 *
4013 4015 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4014 4016 * the vdev queue can aggregate them into larger and fewer writes. Each
4015 4017 * device is written to in a rotor fashion, sweeping writes through
4016 4018 * available space then repeating.
4017 4019 *
4018 4020 * 7. The L2ARC does not store dirty content. It never needs to flush
4019 4021 * write buffers back to disk based storage.
4020 4022 *
4021 4023 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4022 4024 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4023 4025 *
4024 4026 * The performance of the L2ARC can be tweaked by a number of tunables, which
4025 4027 * may be necessary for different workloads:
4026 4028 *
4027 4029 * l2arc_write_max max write bytes per interval
4028 4030 * l2arc_write_boost extra write bytes during device warmup
4029 4031 * l2arc_noprefetch skip caching prefetched buffers
4030 4032 * l2arc_headroom number of max device writes to precache
4031 4033 * l2arc_headroom_boost when we find compressed buffers during ARC
4032 4034 * scanning, we multiply headroom by this
4033 4035 * percentage factor for the next scan cycle,
4034 4036 * since more compressed buffers are likely to
4035 4037 * be present
4036 4038 * l2arc_feed_secs seconds between L2ARC writing
4037 4039 *
4038 4040 * Tunables may be removed or added as future performance improvements are
4039 4041 * integrated, and also may become zpool properties.
4040 4042 *
4041 4043 * There are three key functions that control how the L2ARC warms up:
4042 4044 *
4043 4045 * l2arc_write_eligible() check if a buffer is eligible to cache
4044 4046 * l2arc_write_size() calculate how much to write
4045 4047 * l2arc_write_interval() calculate sleep delay between writes
4046 4048 *
4047 4049 * These three functions determine what to write, how much, and how quickly
4048 4050 * to send writes.
4049 4051 */
4050 4052
4051 4053 static boolean_t
4052 4054 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4053 4055 {
4054 4056 /*
4055 4057 * A buffer is *not* eligible for the L2ARC if it:
4056 4058 * 1. belongs to a different spa.
4057 4059 * 2. is already cached on the L2ARC.
4058 4060 * 3. has an I/O in progress (it may be an incomplete read).
4059 4061 * 4. is flagged not eligible (zfs property).
4060 4062 */
4061 4063 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
4062 4064 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
4063 4065 return (B_FALSE);
4064 4066
4065 4067 return (B_TRUE);
4066 4068 }
4067 4069
4068 4070 static uint64_t
4069 4071 l2arc_write_size(void)
4070 4072 {
4071 4073 uint64_t size;
4072 4074
4073 4075 /*
4074 4076 * Make sure our globals have meaningful values in case the user
4075 4077 * altered them.
4076 4078 */
4077 4079 size = l2arc_write_max;
4078 4080 if (size == 0) {
4079 4081 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4080 4082 "be greater than zero, resetting it to the default (%d)",
4081 4083 L2ARC_WRITE_SIZE);
4082 4084 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4083 4085 }
4084 4086
4085 4087 if (arc_warm == B_FALSE)
4086 4088 size += l2arc_write_boost;
4087 4089
4088 4090 return (size);
4089 4091
4090 4092 }
4091 4093
4092 4094 static clock_t
4093 4095 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4094 4096 {
4095 4097 clock_t interval, next, now;
4096 4098
4097 4099 /*
4098 4100 * If the ARC lists are busy, increase our write rate; if the
4099 4101 * lists are stale, idle back. This is achieved by checking
4100 4102 * how much we previously wrote - if it was more than half of
4101 4103 * what we wanted, schedule the next write much sooner.
4102 4104 */
4103 4105 if (l2arc_feed_again && wrote > (wanted / 2))
4104 4106 interval = (hz * l2arc_feed_min_ms) / 1000;
4105 4107 else
4106 4108 interval = hz * l2arc_feed_secs;
4107 4109
4108 4110 now = ddi_get_lbolt();
4109 4111 next = MAX(now, MIN(now + interval, began + interval));
4110 4112
4111 4113 return (next);
4112 4114 }
4113 4115
4114 4116 static void
4115 4117 l2arc_hdr_stat_add(void)
4116 4118 {
4117 4119 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4118 4120 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4119 4121 }
4120 4122
4121 4123 static void
4122 4124 l2arc_hdr_stat_remove(void)
4123 4125 {
4124 4126 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4125 4127 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4126 4128 }
4127 4129
4128 4130 /*
4129 4131 * Cycle through L2ARC devices. This is how L2ARC load balances.
4130 4132 * If a device is returned, this also returns holding the spa config lock.
4131 4133 */
4132 4134 static l2arc_dev_t *
4133 4135 l2arc_dev_get_next(void)
4134 4136 {
4135 4137 l2arc_dev_t *first, *next = NULL;
4136 4138
4137 4139 /*
4138 4140 * Lock out the removal of spas (spa_namespace_lock), then removal
4139 4141 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4140 4142 * both locks will be dropped and a spa config lock held instead.
4141 4143 */
4142 4144 mutex_enter(&spa_namespace_lock);
4143 4145 mutex_enter(&l2arc_dev_mtx);
4144 4146
4145 4147 /* if there are no vdevs, there is nothing to do */
4146 4148 if (l2arc_ndev == 0)
4147 4149 goto out;
4148 4150
4149 4151 first = NULL;
4150 4152 next = l2arc_dev_last;
4151 4153 do {
4152 4154 /* loop around the list looking for a non-faulted vdev */
4153 4155 if (next == NULL) {
4154 4156 next = list_head(l2arc_dev_list);
4155 4157 } else {
4156 4158 next = list_next(l2arc_dev_list, next);
4157 4159 if (next == NULL)
4158 4160 next = list_head(l2arc_dev_list);
4159 4161 }
4160 4162
4161 4163 /* if we have come back to the start, bail out */
4162 4164 if (first == NULL)
4163 4165 first = next;
4164 4166 else if (next == first)
4165 4167 break;
4166 4168
4167 4169 } while (vdev_is_dead(next->l2ad_vdev));
4168 4170
4169 4171 /* if we were unable to find any usable vdevs, return NULL */
4170 4172 if (vdev_is_dead(next->l2ad_vdev))
4171 4173 next = NULL;
4172 4174
4173 4175 l2arc_dev_last = next;
4174 4176
4175 4177 out:
4176 4178 mutex_exit(&l2arc_dev_mtx);
4177 4179
4178 4180 /*
4179 4181 * Grab the config lock to prevent the 'next' device from being
4180 4182 * removed while we are writing to it.
4181 4183 */
4182 4184 if (next != NULL)
4183 4185 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4184 4186 mutex_exit(&spa_namespace_lock);
4185 4187
4186 4188 return (next);
4187 4189 }
4188 4190
4189 4191 /*
4190 4192 * Free buffers that were tagged for destruction.
4191 4193 */
4192 4194 static void
4193 4195 l2arc_do_free_on_write()
4194 4196 {
4195 4197 list_t *buflist;
4196 4198 l2arc_data_free_t *df, *df_prev;
4197 4199
4198 4200 mutex_enter(&l2arc_free_on_write_mtx);
4199 4201 buflist = l2arc_free_on_write;
4200 4202
4201 4203 for (df = list_tail(buflist); df; df = df_prev) {
4202 4204 df_prev = list_prev(buflist, df);
4203 4205 ASSERT(df->l2df_data != NULL);
4204 4206 ASSERT(df->l2df_func != NULL);
4205 4207 df->l2df_func(df->l2df_data, df->l2df_size);
4206 4208 list_remove(buflist, df);
4207 4209 kmem_free(df, sizeof (l2arc_data_free_t));
4208 4210 }
4209 4211
4210 4212 mutex_exit(&l2arc_free_on_write_mtx);
4211 4213 }
4212 4214
4213 4215 /*
4214 4216 * A write to a cache device has completed. Update all headers to allow
4215 4217 * reads from these buffers to begin.
4216 4218 */
4217 4219 static void
4218 4220 l2arc_write_done(zio_t *zio)
4219 4221 {
4220 4222 l2arc_write_callback_t *cb;
4221 4223 l2arc_dev_t *dev;
4222 4224 list_t *buflist;
4223 4225 arc_buf_hdr_t *head, *ab, *ab_prev;
4224 4226 l2arc_buf_hdr_t *abl2;
4225 4227 kmutex_t *hash_lock;
4226 4228 int64_t bytes_dropped = 0;
4227 4229
4228 4230 cb = zio->io_private;
4229 4231 ASSERT(cb != NULL);
4230 4232 dev = cb->l2wcb_dev;
4231 4233 ASSERT(dev != NULL);
4232 4234 head = cb->l2wcb_head;
4233 4235 ASSERT(head != NULL);
4234 4236 buflist = dev->l2ad_buflist;
4235 4237 ASSERT(buflist != NULL);
4236 4238 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4237 4239 l2arc_write_callback_t *, cb);
4238 4240
4239 4241 if (zio->io_error != 0)
4240 4242 ARCSTAT_BUMP(arcstat_l2_writes_error);
4241 4243
4242 4244 mutex_enter(&l2arc_buflist_mtx);
4243 4245
4244 4246 /*
4245 4247 * All writes completed, or an error was hit.
4246 4248 */
4247 4249 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4248 4250 ab_prev = list_prev(buflist, ab);
4249 4251 abl2 = ab->b_l2hdr;
4250 4252
4251 4253 /*
4252 4254 * Release the temporary compressed buffer as soon as possible.
4253 4255 */
4254 4256 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4255 4257 l2arc_release_cdata_buf(ab);
4256 4258
4257 4259 hash_lock = HDR_LOCK(ab);
4258 4260 if (!mutex_tryenter(hash_lock)) {
4259 4261 /*
4260 4262 * This buffer misses out. It may be in a stage
4261 4263 * of eviction. Its ARC_L2_WRITING flag will be
4262 4264 * left set, denying reads to this buffer.
4263 4265 */
4264 4266 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4265 4267 continue;
4266 4268 }
4267 4269
4268 4270 if (zio->io_error != 0) {
4269 4271 /*
4270 4272 * Error - drop L2ARC entry.
4271 4273 */
4272 4274 list_remove(buflist, ab);
4273 4275 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4274 4276 bytes_dropped += abl2->b_asize;
4275 4277 ab->b_l2hdr = NULL;
4276 4278 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4277 4279 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4278 4280 }
4279 4281
4280 4282 /*
4281 4283 * Allow ARC to begin reads to this L2ARC entry.
4282 4284 */
4283 4285 ab->b_flags &= ~ARC_L2_WRITING;
4284 4286
4285 4287 mutex_exit(hash_lock);
4286 4288 }
4287 4289
4288 4290 atomic_inc_64(&l2arc_writes_done);
4289 4291 list_remove(buflist, head);
4290 4292 kmem_cache_free(hdr_cache, head);
4291 4293 mutex_exit(&l2arc_buflist_mtx);
4292 4294
4293 4295 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
4294 4296
4295 4297 l2arc_do_free_on_write();
4296 4298
4297 4299 kmem_free(cb, sizeof (l2arc_write_callback_t));
4298 4300 }
4299 4301
4300 4302 /*
4301 4303 * A read to a cache device completed. Validate buffer contents before
4302 4304 * handing over to the regular ARC routines.
4303 4305 */
4304 4306 static void
4305 4307 l2arc_read_done(zio_t *zio)
4306 4308 {
4307 4309 l2arc_read_callback_t *cb;
4308 4310 arc_buf_hdr_t *hdr;
4309 4311 arc_buf_t *buf;
4310 4312 kmutex_t *hash_lock;
4311 4313 int equal;
4312 4314
4313 4315 ASSERT(zio->io_vd != NULL);
4314 4316 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4315 4317
4316 4318 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4317 4319
4318 4320 cb = zio->io_private;
4319 4321 ASSERT(cb != NULL);
4320 4322 buf = cb->l2rcb_buf;
4321 4323 ASSERT(buf != NULL);
4322 4324
4323 4325 hash_lock = HDR_LOCK(buf->b_hdr);
4324 4326 mutex_enter(hash_lock);
4325 4327 hdr = buf->b_hdr;
4326 4328 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4327 4329
4328 4330 /*
4329 4331 * If the buffer was compressed, decompress it first.
4330 4332 */
4331 4333 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4332 4334 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4333 4335 ASSERT(zio->io_data != NULL);
4334 4336
4335 4337 /*
4336 4338 * Check this survived the L2ARC journey.
4337 4339 */
4338 4340 equal = arc_cksum_equal(buf);
4339 4341 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4340 4342 mutex_exit(hash_lock);
4341 4343 zio->io_private = buf;
4342 4344 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4343 4345 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4344 4346 arc_read_done(zio);
4345 4347 } else {
4346 4348 mutex_exit(hash_lock);
4347 4349 /*
4348 4350 * Buffer didn't survive caching. Increment stats and
4349 4351 * reissue to the original storage device.
4350 4352 */
4351 4353 if (zio->io_error != 0) {
4352 4354 ARCSTAT_BUMP(arcstat_l2_io_error);
4353 4355 } else {
4354 4356 zio->io_error = SET_ERROR(EIO);
4355 4357 }
4356 4358 if (!equal)
4357 4359 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4358 4360
4359 4361 /*
4360 4362 * If there's no waiter, issue an async i/o to the primary
4361 4363 * storage now. If there *is* a waiter, the caller must
4362 4364 * issue the i/o in a context where it's OK to block.
4363 4365 */
4364 4366 if (zio->io_waiter == NULL) {
4365 4367 zio_t *pio = zio_unique_parent(zio);
4366 4368
4367 4369 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4368 4370
4369 4371 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4370 4372 buf->b_data, zio->io_size, arc_read_done, buf,
4371 4373 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4372 4374 }
4373 4375 }
4374 4376
4375 4377 kmem_free(cb, sizeof (l2arc_read_callback_t));
4376 4378 }
4377 4379
4378 4380 /*
4379 4381 * This is the list priority from which the L2ARC will search for pages to
4380 4382 * cache. This is used within loops (0..3) to cycle through lists in the
4381 4383 * desired order. This order can have a significant effect on cache
4382 4384 * performance.
4383 4385 *
4384 4386 * Currently the metadata lists are hit first, MFU then MRU, followed by
4385 4387 * the data lists. This function returns a locked list, and also returns
4386 4388 * the lock pointer.
4387 4389 */
4388 4390 static list_t *
4389 4391 l2arc_list_locked(int list_num, kmutex_t **lock)
4390 4392 {
4391 4393 list_t *list = NULL;
4392 4394
4393 4395 ASSERT(list_num >= 0 && list_num <= 3);
4394 4396
4395 4397 switch (list_num) {
4396 4398 case 0:
4397 4399 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4398 4400 *lock = &arc_mfu->arcs_mtx;
4399 4401 break;
4400 4402 case 1:
4401 4403 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4402 4404 *lock = &arc_mru->arcs_mtx;
4403 4405 break;
4404 4406 case 2:
4405 4407 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4406 4408 *lock = &arc_mfu->arcs_mtx;
4407 4409 break;
4408 4410 case 3:
4409 4411 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4410 4412 *lock = &arc_mru->arcs_mtx;
4411 4413 break;
4412 4414 }
4413 4415
4414 4416 ASSERT(!(MUTEX_HELD(*lock)));
4415 4417 mutex_enter(*lock);
4416 4418 return (list);
4417 4419 }
4418 4420
4419 4421 /*
4420 4422 * Evict buffers from the device write hand to the distance specified in
4421 4423 * bytes. This distance may span populated buffers, it may span nothing.
4422 4424 * This is clearing a region on the L2ARC device ready for writing.
4423 4425 * If the 'all' boolean is set, every buffer is evicted.
4424 4426 */
4425 4427 static void
4426 4428 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4427 4429 {
4428 4430 list_t *buflist;
4429 4431 l2arc_buf_hdr_t *abl2;
4430 4432 arc_buf_hdr_t *ab, *ab_prev;
4431 4433 kmutex_t *hash_lock;
4432 4434 uint64_t taddr;
4433 4435 int64_t bytes_evicted = 0;
4434 4436
4435 4437 buflist = dev->l2ad_buflist;
4436 4438
4437 4439 if (buflist == NULL)
4438 4440 return;
4439 4441
4440 4442 if (!all && dev->l2ad_first) {
4441 4443 /*
4442 4444 * This is the first sweep through the device. There is
4443 4445 * nothing to evict.
4444 4446 */
4445 4447 return;
4446 4448 }
4447 4449
4448 4450 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4449 4451 /*
4450 4452 * When nearing the end of the device, evict to the end
4451 4453 * before the device write hand jumps to the start.
4452 4454 */
4453 4455 taddr = dev->l2ad_end;
4454 4456 } else {
4455 4457 taddr = dev->l2ad_hand + distance;
4456 4458 }
4457 4459 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4458 4460 uint64_t, taddr, boolean_t, all);
4459 4461
4460 4462 top:
4461 4463 mutex_enter(&l2arc_buflist_mtx);
4462 4464 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4463 4465 ab_prev = list_prev(buflist, ab);
4464 4466
4465 4467 hash_lock = HDR_LOCK(ab);
4466 4468 if (!mutex_tryenter(hash_lock)) {
4467 4469 /*
4468 4470 * Missed the hash lock. Retry.
4469 4471 */
4470 4472 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4471 4473 mutex_exit(&l2arc_buflist_mtx);
4472 4474 mutex_enter(hash_lock);
4473 4475 mutex_exit(hash_lock);
4474 4476 goto top;
4475 4477 }
4476 4478
4477 4479 if (HDR_L2_WRITE_HEAD(ab)) {
4478 4480 /*
4479 4481 * We hit a write head node. Leave it for
4480 4482 * l2arc_write_done().
4481 4483 */
4482 4484 list_remove(buflist, ab);
4483 4485 mutex_exit(hash_lock);
4484 4486 continue;
4485 4487 }
4486 4488
4487 4489 if (!all && ab->b_l2hdr != NULL &&
4488 4490 (ab->b_l2hdr->b_daddr > taddr ||
4489 4491 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4490 4492 /*
4491 4493 * We've evicted to the target address,
4492 4494 * or the end of the device.
4493 4495 */
4494 4496 mutex_exit(hash_lock);
4495 4497 break;
4496 4498 }
4497 4499
4498 4500 if (HDR_FREE_IN_PROGRESS(ab)) {
4499 4501 /*
4500 4502 * Already on the path to destruction.
4501 4503 */
4502 4504 mutex_exit(hash_lock);
4503 4505 continue;
4504 4506 }
4505 4507
4506 4508 if (ab->b_state == arc_l2c_only) {
4507 4509 ASSERT(!HDR_L2_READING(ab));
4508 4510 /*
4509 4511 * This doesn't exist in the ARC. Destroy.
4510 4512 * arc_hdr_destroy() will call list_remove()
4511 4513 * and decrement arcstat_l2_size.
4512 4514 */
4513 4515 arc_change_state(arc_anon, ab, hash_lock);
4514 4516 arc_hdr_destroy(ab);
4515 4517 } else {
4516 4518 /*
4517 4519 * Invalidate issued or about to be issued
4518 4520 * reads, since we may be about to write
4519 4521 * over this location.
4520 4522 */
4521 4523 if (HDR_L2_READING(ab)) {
4522 4524 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4523 4525 ab->b_flags |= ARC_L2_EVICTED;
4524 4526 }
4525 4527
4526 4528 /*
4527 4529 * Tell ARC this no longer exists in L2ARC.
4528 4530 */
4529 4531 if (ab->b_l2hdr != NULL) {
4530 4532 abl2 = ab->b_l2hdr;
4531 4533 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4532 4534 bytes_evicted += abl2->b_asize;
4533 4535 ab->b_l2hdr = NULL;
4534 4536 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4535 4537 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4536 4538 }
4537 4539 list_remove(buflist, ab);
4538 4540
4539 4541 /*
4540 4542 * This may have been leftover after a
4541 4543 * failed write.
4542 4544 */
4543 4545 ab->b_flags &= ~ARC_L2_WRITING;
4544 4546 }
4545 4547 mutex_exit(hash_lock);
4546 4548 }
4547 4549 mutex_exit(&l2arc_buflist_mtx);
4548 4550
4549 4551 vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
4550 4552 dev->l2ad_evict = taddr;
4551 4553 }
4552 4554
4553 4555 /*
4554 4556 * Find and write ARC buffers to the L2ARC device.
4555 4557 *
4556 4558 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4557 4559 * for reading until they have completed writing.
4558 4560 * The headroom_boost is an in-out parameter used to maintain headroom boost
4559 4561 * state between calls to this function.
4560 4562 *
4561 4563 * Returns the number of bytes actually written (which may be smaller than
4562 4564 * the delta by which the device hand has changed due to alignment).
4563 4565 */
4564 4566 static uint64_t
4565 4567 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4566 4568 boolean_t *headroom_boost)
4567 4569 {
4568 4570 arc_buf_hdr_t *ab, *ab_prev, *head;
4569 4571 list_t *list;
4570 4572 uint64_t write_asize, write_psize, write_sz, headroom,
4571 4573 buf_compress_minsz;
4572 4574 void *buf_data;
4573 4575 kmutex_t *list_lock;
4574 4576 boolean_t full;
4575 4577 l2arc_write_callback_t *cb;
4576 4578 zio_t *pio, *wzio;
4577 4579 uint64_t guid = spa_load_guid(spa);
4578 4580 const boolean_t do_headroom_boost = *headroom_boost;
4579 4581
4580 4582 ASSERT(dev->l2ad_vdev != NULL);
4581 4583
4582 4584 /* Lower the flag now, we might want to raise it again later. */
4583 4585 *headroom_boost = B_FALSE;
4584 4586
4585 4587 pio = NULL;
4586 4588 write_sz = write_asize = write_psize = 0;
4587 4589 full = B_FALSE;
4588 4590 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4589 4591 head->b_flags |= ARC_L2_WRITE_HEAD;
4590 4592
4591 4593 /*
4592 4594 * We will want to try to compress buffers that are at least 2x the
4593 4595 * device sector size.
4594 4596 */
4595 4597 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4596 4598
4597 4599 /*
4598 4600 * Copy buffers for L2ARC writing.
4599 4601 */
4600 4602 mutex_enter(&l2arc_buflist_mtx);
4601 4603 for (int try = 0; try <= 3; try++) {
4602 4604 uint64_t passed_sz = 0;
4603 4605
4604 4606 list = l2arc_list_locked(try, &list_lock);
4605 4607
4606 4608 /*
4607 4609 * L2ARC fast warmup.
4608 4610 *
4609 4611 * Until the ARC is warm and starts to evict, read from the
4610 4612 * head of the ARC lists rather than the tail.
4611 4613 */
4612 4614 if (arc_warm == B_FALSE)
4613 4615 ab = list_head(list);
4614 4616 else
4615 4617 ab = list_tail(list);
4616 4618
4617 4619 headroom = target_sz * l2arc_headroom;
4618 4620 if (do_headroom_boost)
4619 4621 headroom = (headroom * l2arc_headroom_boost) / 100;
4620 4622
4621 4623 for (; ab; ab = ab_prev) {
4622 4624 l2arc_buf_hdr_t *l2hdr;
4623 4625 kmutex_t *hash_lock;
4624 4626 uint64_t buf_sz;
4625 4627
4626 4628 if (arc_warm == B_FALSE)
4627 4629 ab_prev = list_next(list, ab);
4628 4630 else
4629 4631 ab_prev = list_prev(list, ab);
4630 4632
4631 4633 hash_lock = HDR_LOCK(ab);
4632 4634 if (!mutex_tryenter(hash_lock)) {
4633 4635 /*
4634 4636 * Skip this buffer rather than waiting.
4635 4637 */
4636 4638 continue;
4637 4639 }
4638 4640
4639 4641 passed_sz += ab->b_size;
4640 4642 if (passed_sz > headroom) {
4641 4643 /*
4642 4644 * Searched too far.
4643 4645 */
4644 4646 mutex_exit(hash_lock);
4645 4647 break;
4646 4648 }
4647 4649
4648 4650 if (!l2arc_write_eligible(guid, ab)) {
4649 4651 mutex_exit(hash_lock);
4650 4652 continue;
4651 4653 }
4652 4654
4653 4655 if ((write_sz + ab->b_size) > target_sz) {
4654 4656 full = B_TRUE;
4655 4657 mutex_exit(hash_lock);
4656 4658 break;
4657 4659 }
4658 4660
4659 4661 if (pio == NULL) {
4660 4662 /*
4661 4663 * Insert a dummy header on the buflist so
4662 4664 * l2arc_write_done() can find where the
4663 4665 * write buffers begin without searching.
4664 4666 */
4665 4667 list_insert_head(dev->l2ad_buflist, head);
4666 4668
4667 4669 cb = kmem_alloc(
4668 4670 sizeof (l2arc_write_callback_t), KM_SLEEP);
4669 4671 cb->l2wcb_dev = dev;
4670 4672 cb->l2wcb_head = head;
4671 4673 pio = zio_root(spa, l2arc_write_done, cb,
4672 4674 ZIO_FLAG_CANFAIL);
4673 4675 }
4674 4676
4675 4677 /*
4676 4678 * Create and add a new L2ARC header.
4677 4679 */
4678 4680 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4679 4681 l2hdr->b_dev = dev;
4680 4682 ab->b_flags |= ARC_L2_WRITING;
4681 4683
4682 4684 /*
4683 4685 * Temporarily stash the data buffer in b_tmp_cdata.
4684 4686 * The subsequent write step will pick it up from
4685 4687 * there. This is because can't access ab->b_buf
4686 4688 * without holding the hash_lock, which we in turn
4687 4689 * can't access without holding the ARC list locks
4688 4690 * (which we want to avoid during compression/writing).
4689 4691 */
4690 4692 l2hdr->b_compress = ZIO_COMPRESS_OFF;
4691 4693 l2hdr->b_asize = ab->b_size;
4692 4694 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4693 4695
4694 4696 buf_sz = ab->b_size;
4695 4697 ab->b_l2hdr = l2hdr;
4696 4698
4697 4699 list_insert_head(dev->l2ad_buflist, ab);
4698 4700
4699 4701 /*
4700 4702 * Compute and store the buffer cksum before
4701 4703 * writing. On debug the cksum is verified first.
4702 4704 */
4703 4705 arc_cksum_verify(ab->b_buf);
4704 4706 arc_cksum_compute(ab->b_buf, B_TRUE);
4705 4707
4706 4708 mutex_exit(hash_lock);
4707 4709
4708 4710 write_sz += buf_sz;
4709 4711 }
4710 4712
4711 4713 mutex_exit(list_lock);
4712 4714
4713 4715 if (full == B_TRUE)
4714 4716 break;
4715 4717 }
4716 4718
4717 4719 /* No buffers selected for writing? */
4718 4720 if (pio == NULL) {
4719 4721 ASSERT0(write_sz);
4720 4722 mutex_exit(&l2arc_buflist_mtx);
4721 4723 kmem_cache_free(hdr_cache, head);
4722 4724 return (0);
4723 4725 }
4724 4726
4725 4727 /*
4726 4728 * Now start writing the buffers. We're starting at the write head
4727 4729 * and work backwards, retracing the course of the buffer selector
4728 4730 * loop above.
4729 4731 */
4730 4732 for (ab = list_prev(dev->l2ad_buflist, head); ab;
4731 4733 ab = list_prev(dev->l2ad_buflist, ab)) {
4732 4734 l2arc_buf_hdr_t *l2hdr;
4733 4735 uint64_t buf_sz;
4734 4736
4735 4737 /*
4736 4738 * We shouldn't need to lock the buffer here, since we flagged
4737 4739 * it as ARC_L2_WRITING in the previous step, but we must take
4738 4740 * care to only access its L2 cache parameters. In particular,
4739 4741 * ab->b_buf may be invalid by now due to ARC eviction.
4740 4742 */
4741 4743 l2hdr = ab->b_l2hdr;
4742 4744 l2hdr->b_daddr = dev->l2ad_hand;
4743 4745
4744 4746 if ((ab->b_flags & ARC_L2COMPRESS) &&
4745 4747 l2hdr->b_asize >= buf_compress_minsz) {
4746 4748 if (l2arc_compress_buf(l2hdr)) {
4747 4749 /*
4748 4750 * If compression succeeded, enable headroom
4749 4751 * boost on the next scan cycle.
4750 4752 */
4751 4753 *headroom_boost = B_TRUE;
4752 4754 }
4753 4755 }
4754 4756
4755 4757 /*
4756 4758 * Pick up the buffer data we had previously stashed away
4757 4759 * (and now potentially also compressed).
4758 4760 */
4759 4761 buf_data = l2hdr->b_tmp_cdata;
4760 4762 buf_sz = l2hdr->b_asize;
4761 4763
4762 4764 /* Compression may have squashed the buffer to zero length. */
4763 4765 if (buf_sz != 0) {
4764 4766 uint64_t buf_p_sz;
4765 4767
4766 4768 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4767 4769 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4768 4770 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4769 4771 ZIO_FLAG_CANFAIL, B_FALSE);
4770 4772
4771 4773 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4772 4774 zio_t *, wzio);
4773 4775 (void) zio_nowait(wzio);
4774 4776
4775 4777 write_asize += buf_sz;
4776 4778 /*
4777 4779 * Keep the clock hand suitably device-aligned.
4778 4780 */
4779 4781 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4780 4782 write_psize += buf_p_sz;
4781 4783 dev->l2ad_hand += buf_p_sz;
4782 4784 }
4783 4785 }
4784 4786
4785 4787 mutex_exit(&l2arc_buflist_mtx);
4786 4788
4787 4789 ASSERT3U(write_asize, <=, target_sz);
4788 4790 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4789 4791 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
4790 4792 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4791 4793 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
4792 4794 vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
4793 4795
4794 4796 /*
4795 4797 * Bump device hand to the device start if it is approaching the end.
4796 4798 * l2arc_evict() will already have evicted ahead for this case.
4797 4799 */
4798 4800 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4799 4801 dev->l2ad_hand = dev->l2ad_start;
4800 4802 dev->l2ad_evict = dev->l2ad_start;
4801 4803 dev->l2ad_first = B_FALSE;
4802 4804 }
4803 4805
4804 4806 dev->l2ad_writing = B_TRUE;
4805 4807 (void) zio_wait(pio);
4806 4808 dev->l2ad_writing = B_FALSE;
4807 4809
4808 4810 return (write_asize);
4809 4811 }
4810 4812
4811 4813 /*
4812 4814 * Compresses an L2ARC buffer.
4813 4815 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
4814 4816 * size in l2hdr->b_asize. This routine tries to compress the data and
4815 4817 * depending on the compression result there are three possible outcomes:
4816 4818 * *) The buffer was incompressible. The original l2hdr contents were left
4817 4819 * untouched and are ready for writing to an L2 device.
4818 4820 * *) The buffer was all-zeros, so there is no need to write it to an L2
4819 4821 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
4820 4822 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
4821 4823 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
4822 4824 * data buffer which holds the compressed data to be written, and b_asize
4823 4825 * tells us how much data there is. b_compress is set to the appropriate
4824 4826 * compression algorithm. Once writing is done, invoke
4825 4827 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
4826 4828 *
4827 4829 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
4828 4830 * buffer was incompressible).
4829 4831 */
4830 4832 static boolean_t
4831 4833 l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
4832 4834 {
4833 4835 void *cdata;
4834 4836 size_t csize, len, rounded;
4835 4837
4836 4838 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
4837 4839 ASSERT(l2hdr->b_tmp_cdata != NULL);
4838 4840
4839 4841 len = l2hdr->b_asize;
4840 4842 cdata = zio_data_buf_alloc(len);
4841 4843 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
4842 4844 cdata, l2hdr->b_asize);
4843 4845
4844 4846 rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
4845 4847 if (rounded > csize) {
4846 4848 bzero((char *)cdata + csize, rounded - csize);
4847 4849 csize = rounded;
4848 4850 }
4849 4851
4850 4852 if (csize == 0) {
4851 4853 /* zero block, indicate that there's nothing to write */
4852 4854 zio_data_buf_free(cdata, len);
4853 4855 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
4854 4856 l2hdr->b_asize = 0;
4855 4857 l2hdr->b_tmp_cdata = NULL;
4856 4858 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
4857 4859 return (B_TRUE);
4858 4860 } else if (csize > 0 && csize < len) {
4859 4861 /*
4860 4862 * Compression succeeded, we'll keep the cdata around for
4861 4863 * writing and release it afterwards.
4862 4864 */
4863 4865 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
4864 4866 l2hdr->b_asize = csize;
4865 4867 l2hdr->b_tmp_cdata = cdata;
4866 4868 ARCSTAT_BUMP(arcstat_l2_compress_successes);
4867 4869 return (B_TRUE);
4868 4870 } else {
4869 4871 /*
4870 4872 * Compression failed, release the compressed buffer.
4871 4873 * l2hdr will be left unmodified.
4872 4874 */
4873 4875 zio_data_buf_free(cdata, len);
4874 4876 ARCSTAT_BUMP(arcstat_l2_compress_failures);
4875 4877 return (B_FALSE);
4876 4878 }
4877 4879 }
4878 4880
4879 4881 /*
4880 4882 * Decompresses a zio read back from an l2arc device. On success, the
4881 4883 * underlying zio's io_data buffer is overwritten by the uncompressed
4882 4884 * version. On decompression error (corrupt compressed stream), the
4883 4885 * zio->io_error value is set to signal an I/O error.
4884 4886 *
4885 4887 * Please note that the compressed data stream is not checksummed, so
4886 4888 * if the underlying device is experiencing data corruption, we may feed
4887 4889 * corrupt data to the decompressor, so the decompressor needs to be
4888 4890 * able to handle this situation (LZ4 does).
4889 4891 */
4890 4892 static void
4891 4893 l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
4892 4894 {
4893 4895 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
4894 4896
4895 4897 if (zio->io_error != 0) {
4896 4898 /*
4897 4899 * An io error has occured, just restore the original io
4898 4900 * size in preparation for a main pool read.
4899 4901 */
4900 4902 zio->io_orig_size = zio->io_size = hdr->b_size;
4901 4903 return;
4902 4904 }
4903 4905
4904 4906 if (c == ZIO_COMPRESS_EMPTY) {
4905 4907 /*
4906 4908 * An empty buffer results in a null zio, which means we
4907 4909 * need to fill its io_data after we're done restoring the
4908 4910 * buffer's contents.
4909 4911 */
4910 4912 ASSERT(hdr->b_buf != NULL);
4911 4913 bzero(hdr->b_buf->b_data, hdr->b_size);
4912 4914 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
4913 4915 } else {
4914 4916 ASSERT(zio->io_data != NULL);
4915 4917 /*
4916 4918 * We copy the compressed data from the start of the arc buffer
4917 4919 * (the zio_read will have pulled in only what we need, the
4918 4920 * rest is garbage which we will overwrite at decompression)
4919 4921 * and then decompress back to the ARC data buffer. This way we
4920 4922 * can minimize copying by simply decompressing back over the
4921 4923 * original compressed data (rather than decompressing to an
4922 4924 * aux buffer and then copying back the uncompressed buffer,
4923 4925 * which is likely to be much larger).
4924 4926 */
4925 4927 uint64_t csize;
4926 4928 void *cdata;
4927 4929
4928 4930 csize = zio->io_size;
4929 4931 cdata = zio_data_buf_alloc(csize);
4930 4932 bcopy(zio->io_data, cdata, csize);
4931 4933 if (zio_decompress_data(c, cdata, zio->io_data, csize,
4932 4934 hdr->b_size) != 0)
4933 4935 zio->io_error = EIO;
4934 4936 zio_data_buf_free(cdata, csize);
4935 4937 }
4936 4938
4937 4939 /* Restore the expected uncompressed IO size. */
4938 4940 zio->io_orig_size = zio->io_size = hdr->b_size;
4939 4941 }
4940 4942
4941 4943 /*
4942 4944 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
4943 4945 * This buffer serves as a temporary holder of compressed data while
4944 4946 * the buffer entry is being written to an l2arc device. Once that is
4945 4947 * done, we can dispose of it.
4946 4948 */
4947 4949 static void
4948 4950 l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
4949 4951 {
4950 4952 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
4951 4953
4952 4954 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
4953 4955 /*
4954 4956 * If the data was compressed, then we've allocated a
4955 4957 * temporary buffer for it, so now we need to release it.
4956 4958 */
4957 4959 ASSERT(l2hdr->b_tmp_cdata != NULL);
4958 4960 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
4959 4961 }
4960 4962 l2hdr->b_tmp_cdata = NULL;
4961 4963 }
4962 4964
4963 4965 /*
4964 4966 * This thread feeds the L2ARC at regular intervals. This is the beating
4965 4967 * heart of the L2ARC.
4966 4968 */
4967 4969 static void
4968 4970 l2arc_feed_thread(void)
4969 4971 {
4970 4972 callb_cpr_t cpr;
4971 4973 l2arc_dev_t *dev;
4972 4974 spa_t *spa;
4973 4975 uint64_t size, wrote;
4974 4976 clock_t begin, next = ddi_get_lbolt();
4975 4977 boolean_t headroom_boost = B_FALSE;
4976 4978
4977 4979 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4978 4980
4979 4981 mutex_enter(&l2arc_feed_thr_lock);
4980 4982
4981 4983 while (l2arc_thread_exit == 0) {
4982 4984 CALLB_CPR_SAFE_BEGIN(&cpr);
4983 4985 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4984 4986 next);
4985 4987 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4986 4988 next = ddi_get_lbolt() + hz;
4987 4989
4988 4990 /*
4989 4991 * Quick check for L2ARC devices.
4990 4992 */
4991 4993 mutex_enter(&l2arc_dev_mtx);
4992 4994 if (l2arc_ndev == 0) {
4993 4995 mutex_exit(&l2arc_dev_mtx);
4994 4996 continue;
4995 4997 }
4996 4998 mutex_exit(&l2arc_dev_mtx);
4997 4999 begin = ddi_get_lbolt();
4998 5000
4999 5001 /*
5000 5002 * This selects the next l2arc device to write to, and in
5001 5003 * doing so the next spa to feed from: dev->l2ad_spa. This
5002 5004 * will return NULL if there are now no l2arc devices or if
5003 5005 * they are all faulted.
5004 5006 *
5005 5007 * If a device is returned, its spa's config lock is also
5006 5008 * held to prevent device removal. l2arc_dev_get_next()
5007 5009 * will grab and release l2arc_dev_mtx.
5008 5010 */
5009 5011 if ((dev = l2arc_dev_get_next()) == NULL)
5010 5012 continue;
5011 5013
5012 5014 spa = dev->l2ad_spa;
5013 5015 ASSERT(spa != NULL);
5014 5016
5015 5017 /*
5016 5018 * If the pool is read-only then force the feed thread to
5017 5019 * sleep a little longer.
5018 5020 */
5019 5021 if (!spa_writeable(spa)) {
5020 5022 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5021 5023 spa_config_exit(spa, SCL_L2ARC, dev);
5022 5024 continue;
5023 5025 }
5024 5026
5025 5027 /*
5026 5028 * Avoid contributing to memory pressure.
5027 5029 */
5028 5030 if (arc_reclaim_needed()) {
5029 5031 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5030 5032 spa_config_exit(spa, SCL_L2ARC, dev);
5031 5033 continue;
5032 5034 }
5033 5035
5034 5036 ARCSTAT_BUMP(arcstat_l2_feeds);
5035 5037
5036 5038 size = l2arc_write_size();
5037 5039
5038 5040 /*
5039 5041 * Evict L2ARC buffers that will be overwritten.
5040 5042 */
5041 5043 l2arc_evict(dev, size, B_FALSE);
5042 5044
5043 5045 /*
5044 5046 * Write ARC buffers.
5045 5047 */
5046 5048 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5047 5049
5048 5050 /*
5049 5051 * Calculate interval between writes.
5050 5052 */
5051 5053 next = l2arc_write_interval(begin, size, wrote);
5052 5054 spa_config_exit(spa, SCL_L2ARC, dev);
5053 5055 }
5054 5056
5055 5057 l2arc_thread_exit = 0;
5056 5058 cv_broadcast(&l2arc_feed_thr_cv);
5057 5059 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5058 5060 thread_exit();
5059 5061 }
5060 5062
5061 5063 boolean_t
5062 5064 l2arc_vdev_present(vdev_t *vd)
5063 5065 {
5064 5066 l2arc_dev_t *dev;
5065 5067
5066 5068 mutex_enter(&l2arc_dev_mtx);
5067 5069 for (dev = list_head(l2arc_dev_list); dev != NULL;
5068 5070 dev = list_next(l2arc_dev_list, dev)) {
5069 5071 if (dev->l2ad_vdev == vd)
5070 5072 break;
5071 5073 }
5072 5074 mutex_exit(&l2arc_dev_mtx);
5073 5075
5074 5076 return (dev != NULL);
5075 5077 }
5076 5078
5077 5079 /*
5078 5080 * Add a vdev for use by the L2ARC. By this point the spa has already
5079 5081 * validated the vdev and opened it.
5080 5082 */
5081 5083 void
5082 5084 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5083 5085 {
5084 5086 l2arc_dev_t *adddev;
5085 5087
5086 5088 ASSERT(!l2arc_vdev_present(vd));
5087 5089
5088 5090 /*
5089 5091 * Create a new l2arc device entry.
5090 5092 */
5091 5093 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5092 5094 adddev->l2ad_spa = spa;
5093 5095 adddev->l2ad_vdev = vd;
5094 5096 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5095 5097 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5096 5098 adddev->l2ad_hand = adddev->l2ad_start;
5097 5099 adddev->l2ad_evict = adddev->l2ad_start;
5098 5100 adddev->l2ad_first = B_TRUE;
5099 5101 adddev->l2ad_writing = B_FALSE;
5100 5102
5101 5103 /*
5102 5104 * This is a list of all ARC buffers that are still valid on the
5103 5105 * device.
5104 5106 */
5105 5107 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5106 5108 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5107 5109 offsetof(arc_buf_hdr_t, b_l2node));
5108 5110
5109 5111 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5110 5112
5111 5113 /*
5112 5114 * Add device to global list
5113 5115 */
5114 5116 mutex_enter(&l2arc_dev_mtx);
5115 5117 list_insert_head(l2arc_dev_list, adddev);
5116 5118 atomic_inc_64(&l2arc_ndev);
5117 5119 mutex_exit(&l2arc_dev_mtx);
5118 5120 }
5119 5121
5120 5122 /*
5121 5123 * Remove a vdev from the L2ARC.
5122 5124 */
5123 5125 void
5124 5126 l2arc_remove_vdev(vdev_t *vd)
5125 5127 {
5126 5128 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5127 5129
5128 5130 /*
5129 5131 * Find the device by vdev
5130 5132 */
5131 5133 mutex_enter(&l2arc_dev_mtx);
5132 5134 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5133 5135 nextdev = list_next(l2arc_dev_list, dev);
5134 5136 if (vd == dev->l2ad_vdev) {
5135 5137 remdev = dev;
5136 5138 break;
5137 5139 }
5138 5140 }
5139 5141 ASSERT(remdev != NULL);
5140 5142
5141 5143 /*
5142 5144 * Remove device from global list
5143 5145 */
5144 5146 list_remove(l2arc_dev_list, remdev);
5145 5147 l2arc_dev_last = NULL; /* may have been invalidated */
5146 5148 atomic_dec_64(&l2arc_ndev);
5147 5149 mutex_exit(&l2arc_dev_mtx);
5148 5150
5149 5151 /*
5150 5152 * Clear all buflists and ARC references. L2ARC device flush.
5151 5153 */
5152 5154 l2arc_evict(remdev, 0, B_TRUE);
5153 5155 list_destroy(remdev->l2ad_buflist);
5154 5156 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5155 5157 kmem_free(remdev, sizeof (l2arc_dev_t));
5156 5158 }
5157 5159
5158 5160 void
5159 5161 l2arc_init(void)
5160 5162 {
5161 5163 l2arc_thread_exit = 0;
5162 5164 l2arc_ndev = 0;
5163 5165 l2arc_writes_sent = 0;
5164 5166 l2arc_writes_done = 0;
5165 5167
5166 5168 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5167 5169 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5168 5170 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5169 5171 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5170 5172 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5171 5173
5172 5174 l2arc_dev_list = &L2ARC_dev_list;
5173 5175 l2arc_free_on_write = &L2ARC_free_on_write;
5174 5176 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5175 5177 offsetof(l2arc_dev_t, l2ad_node));
5176 5178 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5177 5179 offsetof(l2arc_data_free_t, l2df_list_node));
5178 5180 }
5179 5181
5180 5182 void
5181 5183 l2arc_fini(void)
5182 5184 {
5183 5185 /*
5184 5186 * This is called from dmu_fini(), which is called from spa_fini();
5185 5187 * Because of this, we can assume that all l2arc devices have
5186 5188 * already been removed when the pools themselves were removed.
5187 5189 */
5188 5190
5189 5191 l2arc_do_free_on_write();
5190 5192
5191 5193 mutex_destroy(&l2arc_feed_thr_lock);
5192 5194 cv_destroy(&l2arc_feed_thr_cv);
5193 5195 mutex_destroy(&l2arc_dev_mtx);
5194 5196 mutex_destroy(&l2arc_buflist_mtx);
5195 5197 mutex_destroy(&l2arc_free_on_write_mtx);
5196 5198
5197 5199 list_destroy(l2arc_dev_list);
5198 5200 list_destroy(l2arc_free_on_write);
5199 5201 }
5200 5202
5201 5203 void
5202 5204 l2arc_start(void)
5203 5205 {
5204 5206 if (!(spa_mode_global & FWRITE))
5205 5207 return;
5206 5208
5207 5209 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5208 5210 TS_RUN, minclsyspri);
5209 5211 }
5210 5212
5211 5213 void
5212 5214 l2arc_stop(void)
5213 5215 {
5214 5216 if (!(spa_mode_global & FWRITE))
5215 5217 return;
5216 5218
5217 5219 mutex_enter(&l2arc_feed_thr_lock);
5218 5220 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5219 5221 l2arc_thread_exit = 1;
5220 5222 while (l2arc_thread_exit != 0)
5221 5223 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5222 5224 mutex_exit(&l2arc_feed_thr_lock);
5223 5225 }
↓ open down ↓ |
2645 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX