Print this page
3742 zfs comments need cleaner, more consistent style
Submitted by: Will Andrews <willa@spectralogic.com>
Submitted by: Alan Somers <alans@spectralogic.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Eric Schrock <eric.schrock@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/arc.c
+++ new/usr/src/uts/common/fs/zfs/arc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * DVA-based Adjustable Replacement Cache
29 29 *
30 30 * While much of the theory of operation used here is
31 31 * based on the self-tuning, low overhead replacement cache
32 32 * presented by Megiddo and Modha at FAST 2003, there are some
33 33 * significant differences:
34 34 *
35 35 * 1. The Megiddo and Modha model assumes any page is evictable.
36 36 * Pages in its cache cannot be "locked" into memory. This makes
37 37 * the eviction algorithm simple: evict the last page in the list.
38 38 * This also make the performance characteristics easy to reason
39 39 * about. Our cache is not so simple. At any given moment, some
40 40 * subset of the blocks in the cache are un-evictable because we
41 41 * have handed out a reference to them. Blocks are only evictable
42 42 * when there are no external references active. This makes
43 43 * eviction far more problematic: we choose to evict the evictable
44 44 * blocks that are the "lowest" in the list.
45 45 *
46 46 * There are times when it is not possible to evict the requested
47 47 * space. In these circumstances we are unable to adjust the cache
48 48 * size. To prevent the cache growing unbounded at these times we
49 49 * implement a "cache throttle" that slows the flow of new data
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
50 50 * into the cache until we can make space available.
51 51 *
52 52 * 2. The Megiddo and Modha model assumes a fixed cache size.
53 53 * Pages are evicted when the cache is full and there is a cache
54 54 * miss. Our model has a variable sized cache. It grows with
55 55 * high use, but also tries to react to memory pressure from the
56 56 * operating system: decreasing its size when system memory is
57 57 * tight.
58 58 *
59 59 * 3. The Megiddo and Modha model assumes a fixed page size. All
60 - * elements of the cache are therefor exactly the same size. So
60 + * elements of the cache are therefore exactly the same size. So
61 61 * when adjusting the cache size following a cache miss, its simply
62 62 * a matter of choosing a single page to evict. In our model, we
63 63 * have variable sized cache blocks (rangeing from 512 bytes to
64 - * 128K bytes). We therefor choose a set of blocks to evict to make
64 + * 128K bytes). We therefore choose a set of blocks to evict to make
65 65 * space for a cache miss that approximates as closely as possible
66 66 * the space used by the new block.
67 67 *
68 68 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
69 69 * by N. Megiddo & D. Modha, FAST 2003
70 70 */
71 71
72 72 /*
73 73 * The locking model:
74 74 *
75 75 * A new reference to a cache buffer can be obtained in two
76 76 * ways: 1) via a hash table lookup using the DVA as a key,
77 77 * or 2) via one of the ARC lists. The arc_read() interface
78 78 * uses method 1, while the internal arc algorithms for
79 - * adjusting the cache use method 2. We therefor provide two
79 + * adjusting the cache use method 2. We therefore provide two
80 80 * types of locks: 1) the hash table lock array, and 2) the
81 81 * arc list locks.
82 82 *
83 83 * Buffers do not have their own mutexes, rather they rely on the
84 84 * hash table mutexes for the bulk of their protection (i.e. most
85 85 * fields in the arc_buf_hdr_t are protected by these mutexes).
86 86 *
87 87 * buf_hash_find() returns the appropriate mutex (held) when it
88 88 * locates the requested buffer in the hash table. It returns
89 89 * NULL for the mutex if the buffer was not in the table.
90 90 *
91 91 * buf_hash_remove() expects the appropriate hash mutex to be
92 92 * already held before it is invoked.
93 93 *
94 94 * Each arc state also has a mutex which is used to protect the
95 95 * buffer list associated with the state. When attempting to
96 96 * obtain a hash table lock while holding an arc list lock you
97 97 * must use: mutex_tryenter() to avoid deadlock. Also note that
98 98 * the active state mutex must be held before the ghost state mutex.
99 99 *
100 100 * Arc buffers may have an associated eviction callback function.
101 101 * This function will be invoked prior to removing the buffer (e.g.
102 102 * in arc_do_user_evicts()). Note however that the data associated
103 103 * with the buffer may be evicted prior to the callback. The callback
104 104 * must be made with *no locks held* (to prevent deadlock). Additionally,
105 105 * the users of callbacks must ensure that their private data is
106 106 * protected from simultaneous callbacks from arc_buf_evict()
107 107 * and arc_do_user_evicts().
108 108 *
109 109 * Note that the majority of the performance stats are manipulated
110 110 * with atomic operations.
111 111 *
112 112 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
113 113 *
114 114 * - L2ARC buflist creation
115 115 * - L2ARC buflist eviction
116 116 * - L2ARC write completion, which walks L2ARC buflists
117 117 * - ARC header destruction, as it removes from L2ARC buflists
118 118 * - ARC header release, as it removes from L2ARC buflists
119 119 */
120 120
121 121 #include <sys/spa.h>
122 122 #include <sys/zio.h>
123 123 #include <sys/zfs_context.h>
124 124 #include <sys/arc.h>
125 125 #include <sys/refcount.h>
126 126 #include <sys/vdev.h>
127 127 #include <sys/vdev_impl.h>
128 128 #ifdef _KERNEL
129 129 #include <sys/vmsystm.h>
130 130 #include <vm/anon.h>
131 131 #include <sys/fs/swapnode.h>
132 132 #include <sys/dnlc.h>
133 133 #endif
134 134 #include <sys/callb.h>
135 135 #include <sys/kstat.h>
136 136 #include <zfs_fletcher.h>
137 137
138 138 #ifndef _KERNEL
139 139 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
140 140 boolean_t arc_watch = B_FALSE;
141 141 int arc_procfd;
142 142 #endif
143 143
144 144 static kmutex_t arc_reclaim_thr_lock;
145 145 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
146 146 static uint8_t arc_thread_exit;
147 147
148 148 extern int zfs_write_limit_shift;
149 149 extern uint64_t zfs_write_limit_max;
150 150 extern kmutex_t zfs_write_limit_lock;
151 151
152 152 #define ARC_REDUCE_DNLC_PERCENT 3
153 153 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
154 154
155 155 typedef enum arc_reclaim_strategy {
156 156 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
157 157 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
158 158 } arc_reclaim_strategy_t;
159 159
160 160 /* number of seconds before growing cache again */
161 161 static int arc_grow_retry = 60;
162 162
163 163 /* shift of arc_c for calculating both min and max arc_p */
164 164 static int arc_p_min_shift = 4;
165 165
166 166 /* log2(fraction of arc to reclaim) */
167 167 static int arc_shrink_shift = 5;
168 168
169 169 /*
170 170 * minimum lifespan of a prefetch block in clock ticks
171 171 * (initialized in arc_init())
172 172 */
173 173 static int arc_min_prefetch_lifespan;
174 174
175 175 static int arc_dead;
176 176
177 177 /*
178 178 * The arc has filled available memory and has now warmed up.
179 179 */
180 180 static boolean_t arc_warm;
181 181
182 182 /*
183 183 * These tunables are for performance analysis.
184 184 */
185 185 uint64_t zfs_arc_max;
186 186 uint64_t zfs_arc_min;
187 187 uint64_t zfs_arc_meta_limit = 0;
188 188 int zfs_arc_grow_retry = 0;
189 189 int zfs_arc_shrink_shift = 0;
190 190 int zfs_arc_p_min_shift = 0;
191 191 int zfs_disable_dup_eviction = 0;
192 192
193 193 /*
194 194 * Note that buffers can be in one of 6 states:
195 195 * ARC_anon - anonymous (discussed below)
196 196 * ARC_mru - recently used, currently cached
197 197 * ARC_mru_ghost - recentely used, no longer in cache
198 198 * ARC_mfu - frequently used, currently cached
199 199 * ARC_mfu_ghost - frequently used, no longer in cache
200 200 * ARC_l2c_only - exists in L2ARC but not other states
201 201 * When there are no active references to the buffer, they are
202 202 * are linked onto a list in one of these arc states. These are
203 203 * the only buffers that can be evicted or deleted. Within each
204 204 * state there are multiple lists, one for meta-data and one for
205 205 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
206 206 * etc.) is tracked separately so that it can be managed more
207 207 * explicitly: favored over data, limited explicitly.
208 208 *
209 209 * Anonymous buffers are buffers that are not associated with
210 210 * a DVA. These are buffers that hold dirty block copies
211 211 * before they are written to stable storage. By definition,
212 212 * they are "ref'd" and are considered part of arc_mru
213 213 * that cannot be freed. Generally, they will aquire a DVA
214 214 * as they are written and migrate onto the arc_mru list.
215 215 *
216 216 * The ARC_l2c_only state is for buffers that are in the second
217 217 * level ARC but no longer in any of the ARC_m* lists. The second
218 218 * level ARC itself may also contain buffers that are in any of
219 219 * the ARC_m* states - meaning that a buffer can exist in two
220 220 * places. The reason for the ARC_l2c_only state is to keep the
221 221 * buffer header in the hash table, so that reads that hit the
222 222 * second level ARC benefit from these fast lookups.
223 223 */
224 224
225 225 typedef struct arc_state {
226 226 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
227 227 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
228 228 uint64_t arcs_size; /* total amount of data in this state */
229 229 kmutex_t arcs_mtx;
230 230 } arc_state_t;
231 231
232 232 /* The 6 states: */
233 233 static arc_state_t ARC_anon;
234 234 static arc_state_t ARC_mru;
235 235 static arc_state_t ARC_mru_ghost;
236 236 static arc_state_t ARC_mfu;
237 237 static arc_state_t ARC_mfu_ghost;
238 238 static arc_state_t ARC_l2c_only;
239 239
240 240 typedef struct arc_stats {
241 241 kstat_named_t arcstat_hits;
242 242 kstat_named_t arcstat_misses;
243 243 kstat_named_t arcstat_demand_data_hits;
244 244 kstat_named_t arcstat_demand_data_misses;
245 245 kstat_named_t arcstat_demand_metadata_hits;
246 246 kstat_named_t arcstat_demand_metadata_misses;
247 247 kstat_named_t arcstat_prefetch_data_hits;
248 248 kstat_named_t arcstat_prefetch_data_misses;
249 249 kstat_named_t arcstat_prefetch_metadata_hits;
250 250 kstat_named_t arcstat_prefetch_metadata_misses;
251 251 kstat_named_t arcstat_mru_hits;
252 252 kstat_named_t arcstat_mru_ghost_hits;
253 253 kstat_named_t arcstat_mfu_hits;
254 254 kstat_named_t arcstat_mfu_ghost_hits;
255 255 kstat_named_t arcstat_deleted;
256 256 kstat_named_t arcstat_recycle_miss;
257 257 /*
258 258 * Number of buffers that could not be evicted because the hash lock
259 259 * was held by another thread. The lock may not necessarily be held
260 260 * by something using the same buffer, since hash locks are shared
261 261 * by multiple buffers.
262 262 */
263 263 kstat_named_t arcstat_mutex_miss;
264 264 /*
265 265 * Number of buffers skipped because they have I/O in progress, are
266 266 * indrect prefetch buffers that have not lived long enough, or are
267 267 * not from the spa we're trying to evict from.
268 268 */
269 269 kstat_named_t arcstat_evict_skip;
270 270 kstat_named_t arcstat_evict_l2_cached;
271 271 kstat_named_t arcstat_evict_l2_eligible;
272 272 kstat_named_t arcstat_evict_l2_ineligible;
273 273 kstat_named_t arcstat_hash_elements;
274 274 kstat_named_t arcstat_hash_elements_max;
275 275 kstat_named_t arcstat_hash_collisions;
276 276 kstat_named_t arcstat_hash_chains;
277 277 kstat_named_t arcstat_hash_chain_max;
278 278 kstat_named_t arcstat_p;
279 279 kstat_named_t arcstat_c;
280 280 kstat_named_t arcstat_c_min;
281 281 kstat_named_t arcstat_c_max;
282 282 kstat_named_t arcstat_size;
283 283 kstat_named_t arcstat_hdr_size;
284 284 kstat_named_t arcstat_data_size;
285 285 kstat_named_t arcstat_other_size;
286 286 kstat_named_t arcstat_l2_hits;
287 287 kstat_named_t arcstat_l2_misses;
288 288 kstat_named_t arcstat_l2_feeds;
289 289 kstat_named_t arcstat_l2_rw_clash;
290 290 kstat_named_t arcstat_l2_read_bytes;
291 291 kstat_named_t arcstat_l2_write_bytes;
292 292 kstat_named_t arcstat_l2_writes_sent;
293 293 kstat_named_t arcstat_l2_writes_done;
294 294 kstat_named_t arcstat_l2_writes_error;
295 295 kstat_named_t arcstat_l2_writes_hdr_miss;
296 296 kstat_named_t arcstat_l2_evict_lock_retry;
297 297 kstat_named_t arcstat_l2_evict_reading;
298 298 kstat_named_t arcstat_l2_free_on_write;
299 299 kstat_named_t arcstat_l2_abort_lowmem;
300 300 kstat_named_t arcstat_l2_cksum_bad;
301 301 kstat_named_t arcstat_l2_io_error;
302 302 kstat_named_t arcstat_l2_size;
303 303 kstat_named_t arcstat_l2_hdr_size;
304 304 kstat_named_t arcstat_memory_throttle_count;
305 305 kstat_named_t arcstat_duplicate_buffers;
306 306 kstat_named_t arcstat_duplicate_buffers_size;
307 307 kstat_named_t arcstat_duplicate_reads;
308 308 kstat_named_t arcstat_meta_used;
309 309 kstat_named_t arcstat_meta_limit;
310 310 kstat_named_t arcstat_meta_max;
311 311 } arc_stats_t;
312 312
313 313 static arc_stats_t arc_stats = {
314 314 { "hits", KSTAT_DATA_UINT64 },
315 315 { "misses", KSTAT_DATA_UINT64 },
316 316 { "demand_data_hits", KSTAT_DATA_UINT64 },
317 317 { "demand_data_misses", KSTAT_DATA_UINT64 },
318 318 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
319 319 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
320 320 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
321 321 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
322 322 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
323 323 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
324 324 { "mru_hits", KSTAT_DATA_UINT64 },
325 325 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
326 326 { "mfu_hits", KSTAT_DATA_UINT64 },
327 327 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
328 328 { "deleted", KSTAT_DATA_UINT64 },
329 329 { "recycle_miss", KSTAT_DATA_UINT64 },
330 330 { "mutex_miss", KSTAT_DATA_UINT64 },
331 331 { "evict_skip", KSTAT_DATA_UINT64 },
332 332 { "evict_l2_cached", KSTAT_DATA_UINT64 },
333 333 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
334 334 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
335 335 { "hash_elements", KSTAT_DATA_UINT64 },
336 336 { "hash_elements_max", KSTAT_DATA_UINT64 },
337 337 { "hash_collisions", KSTAT_DATA_UINT64 },
338 338 { "hash_chains", KSTAT_DATA_UINT64 },
339 339 { "hash_chain_max", KSTAT_DATA_UINT64 },
340 340 { "p", KSTAT_DATA_UINT64 },
341 341 { "c", KSTAT_DATA_UINT64 },
342 342 { "c_min", KSTAT_DATA_UINT64 },
343 343 { "c_max", KSTAT_DATA_UINT64 },
344 344 { "size", KSTAT_DATA_UINT64 },
345 345 { "hdr_size", KSTAT_DATA_UINT64 },
346 346 { "data_size", KSTAT_DATA_UINT64 },
347 347 { "other_size", KSTAT_DATA_UINT64 },
348 348 { "l2_hits", KSTAT_DATA_UINT64 },
349 349 { "l2_misses", KSTAT_DATA_UINT64 },
350 350 { "l2_feeds", KSTAT_DATA_UINT64 },
351 351 { "l2_rw_clash", KSTAT_DATA_UINT64 },
352 352 { "l2_read_bytes", KSTAT_DATA_UINT64 },
353 353 { "l2_write_bytes", KSTAT_DATA_UINT64 },
354 354 { "l2_writes_sent", KSTAT_DATA_UINT64 },
355 355 { "l2_writes_done", KSTAT_DATA_UINT64 },
356 356 { "l2_writes_error", KSTAT_DATA_UINT64 },
357 357 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
358 358 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
359 359 { "l2_evict_reading", KSTAT_DATA_UINT64 },
360 360 { "l2_free_on_write", KSTAT_DATA_UINT64 },
361 361 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
362 362 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
363 363 { "l2_io_error", KSTAT_DATA_UINT64 },
364 364 { "l2_size", KSTAT_DATA_UINT64 },
365 365 { "l2_hdr_size", KSTAT_DATA_UINT64 },
366 366 { "memory_throttle_count", KSTAT_DATA_UINT64 },
367 367 { "duplicate_buffers", KSTAT_DATA_UINT64 },
↓ open down ↓ |
278 lines elided |
↑ open up ↑ |
368 368 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
369 369 { "duplicate_reads", KSTAT_DATA_UINT64 },
370 370 { "arc_meta_used", KSTAT_DATA_UINT64 },
371 371 { "arc_meta_limit", KSTAT_DATA_UINT64 },
372 372 { "arc_meta_max", KSTAT_DATA_UINT64 }
373 373 };
374 374
375 375 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
376 376
377 377 #define ARCSTAT_INCR(stat, val) \
378 - atomic_add_64(&arc_stats.stat.value.ui64, (val));
378 + atomic_add_64(&arc_stats.stat.value.ui64, (val))
379 379
380 380 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
381 381 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
382 382
383 383 #define ARCSTAT_MAX(stat, val) { \
384 384 uint64_t m; \
385 385 while ((val) > (m = arc_stats.stat.value.ui64) && \
386 386 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
387 387 continue; \
388 388 }
389 389
390 390 #define ARCSTAT_MAXSTAT(stat) \
391 391 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
392 392
393 393 /*
394 394 * We define a macro to allow ARC hits/misses to be easily broken down by
395 395 * two separate conditions, giving a total of four different subtypes for
396 396 * each of hits and misses (so eight statistics total).
397 397 */
398 398 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
399 399 if (cond1) { \
400 400 if (cond2) { \
401 401 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
402 402 } else { \
403 403 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
404 404 } \
405 405 } else { \
406 406 if (cond2) { \
407 407 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
408 408 } else { \
409 409 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
410 410 } \
411 411 }
412 412
413 413 kstat_t *arc_ksp;
414 414 static arc_state_t *arc_anon;
415 415 static arc_state_t *arc_mru;
416 416 static arc_state_t *arc_mru_ghost;
417 417 static arc_state_t *arc_mfu;
418 418 static arc_state_t *arc_mfu_ghost;
419 419 static arc_state_t *arc_l2c_only;
420 420
421 421 /*
422 422 * There are several ARC variables that are critical to export as kstats --
423 423 * but we don't want to have to grovel around in the kstat whenever we wish to
424 424 * manipulate them. For these variables, we therefore define them to be in
425 425 * terms of the statistic variable. This assures that we are not introducing
426 426 * the possibility of inconsistency by having shadow copies of the variables,
427 427 * while still allowing the code to be readable.
428 428 */
429 429 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
430 430 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
431 431 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
432 432 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
433 433 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
434 434 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
435 435 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
436 436 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
437 437
438 438 static int arc_no_grow; /* Don't try to grow cache size */
439 439 static uint64_t arc_tempreserve;
440 440 static uint64_t arc_loaned_bytes;
441 441
442 442 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
443 443
444 444 typedef struct arc_callback arc_callback_t;
445 445
446 446 struct arc_callback {
447 447 void *acb_private;
448 448 arc_done_func_t *acb_done;
449 449 arc_buf_t *acb_buf;
450 450 zio_t *acb_zio_dummy;
451 451 arc_callback_t *acb_next;
452 452 };
453 453
454 454 typedef struct arc_write_callback arc_write_callback_t;
455 455
456 456 struct arc_write_callback {
457 457 void *awcb_private;
458 458 arc_done_func_t *awcb_ready;
459 459 arc_done_func_t *awcb_done;
460 460 arc_buf_t *awcb_buf;
461 461 };
462 462
463 463 struct arc_buf_hdr {
464 464 /* protected by hash lock */
465 465 dva_t b_dva;
466 466 uint64_t b_birth;
467 467 uint64_t b_cksum0;
468 468
469 469 kmutex_t b_freeze_lock;
470 470 zio_cksum_t *b_freeze_cksum;
471 471 void *b_thawed;
472 472
473 473 arc_buf_hdr_t *b_hash_next;
474 474 arc_buf_t *b_buf;
475 475 uint32_t b_flags;
476 476 uint32_t b_datacnt;
477 477
478 478 arc_callback_t *b_acb;
479 479 kcondvar_t b_cv;
480 480
481 481 /* immutable */
482 482 arc_buf_contents_t b_type;
483 483 uint64_t b_size;
484 484 uint64_t b_spa;
485 485
486 486 /* protected by arc state mutex */
487 487 arc_state_t *b_state;
488 488 list_node_t b_arc_node;
489 489
490 490 /* updated atomically */
491 491 clock_t b_arc_access;
492 492
493 493 /* self protecting */
494 494 refcount_t b_refcnt;
495 495
496 496 l2arc_buf_hdr_t *b_l2hdr;
497 497 list_node_t b_l2node;
498 498 };
499 499
500 500 static arc_buf_t *arc_eviction_list;
501 501 static kmutex_t arc_eviction_mtx;
502 502 static arc_buf_hdr_t arc_eviction_hdr;
503 503 static void arc_get_data_buf(arc_buf_t *buf);
504 504 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
505 505 static int arc_evict_needed(arc_buf_contents_t type);
506 506 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
507 507 static void arc_buf_watch(arc_buf_t *buf);
508 508
509 509 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
510 510
511 511 #define GHOST_STATE(state) \
512 512 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
513 513 (state) == arc_l2c_only)
514 514
515 515 /*
516 516 * Private ARC flags. These flags are private ARC only flags that will show up
517 517 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
518 518 * be passed in as arc_flags in things like arc_read. However, these flags
519 519 * should never be passed and should only be set by ARC code. When adding new
520 520 * public flags, make sure not to smash the private ones.
521 521 */
522 522
523 523 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
524 524 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
525 525 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
526 526 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
527 527 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
528 528 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
529 529 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
530 530 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
531 531 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
532 532 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
533 533
534 534 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
535 535 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
536 536 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
537 537 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
538 538 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
539 539 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
540 540 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
541 541 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
542 542 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
543 543 (hdr)->b_l2hdr != NULL)
544 544 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
545 545 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
546 546 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
547 547
548 548 /*
549 549 * Other sizes
550 550 */
551 551
552 552 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
553 553 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
554 554
555 555 /*
556 556 * Hash table routines
557 557 */
558 558
559 559 #define HT_LOCK_PAD 64
560 560
561 561 struct ht_lock {
562 562 kmutex_t ht_lock;
563 563 #ifdef _KERNEL
564 564 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
565 565 #endif
566 566 };
567 567
568 568 #define BUF_LOCKS 256
569 569 typedef struct buf_hash_table {
570 570 uint64_t ht_mask;
571 571 arc_buf_hdr_t **ht_table;
572 572 struct ht_lock ht_locks[BUF_LOCKS];
573 573 } buf_hash_table_t;
574 574
575 575 static buf_hash_table_t buf_hash_table;
576 576
577 577 #define BUF_HASH_INDEX(spa, dva, birth) \
578 578 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
579 579 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
580 580 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
581 581 #define HDR_LOCK(hdr) \
582 582 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
583 583
584 584 uint64_t zfs_crc64_table[256];
585 585
586 586 /*
587 587 * Level 2 ARC
↓ open down ↓ |
199 lines elided |
↑ open up ↑ |
588 588 */
589 589
590 590 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
591 591 #define L2ARC_HEADROOM 2 /* num of writes */
592 592 #define L2ARC_FEED_SECS 1 /* caching interval secs */
593 593 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
594 594
595 595 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
596 596 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
597 597
598 -/*
599 - * L2ARC Performance Tunables
600 - */
598 +/* L2ARC Performance Tunables */
601 599 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
602 600 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
603 601 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
604 602 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
605 603 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
606 604 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
607 605 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
608 606 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
609 607
610 608 /*
611 609 * L2ARC Internals
612 610 */
613 611 typedef struct l2arc_dev {
614 612 vdev_t *l2ad_vdev; /* vdev */
615 613 spa_t *l2ad_spa; /* spa */
616 614 uint64_t l2ad_hand; /* next write location */
617 615 uint64_t l2ad_write; /* desired write size, bytes */
618 616 uint64_t l2ad_boost; /* warmup write boost, bytes */
619 617 uint64_t l2ad_start; /* first addr on device */
620 618 uint64_t l2ad_end; /* last addr on device */
621 619 uint64_t l2ad_evict; /* last addr eviction reached */
622 620 boolean_t l2ad_first; /* first sweep through */
623 621 boolean_t l2ad_writing; /* currently writing */
624 622 list_t *l2ad_buflist; /* buffer list */
625 623 list_node_t l2ad_node; /* device list node */
626 624 } l2arc_dev_t;
627 625
628 626 static list_t L2ARC_dev_list; /* device list */
629 627 static list_t *l2arc_dev_list; /* device list pointer */
630 628 static kmutex_t l2arc_dev_mtx; /* device list mutex */
631 629 static l2arc_dev_t *l2arc_dev_last; /* last device used */
632 630 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
633 631 static list_t L2ARC_free_on_write; /* free after write buf list */
634 632 static list_t *l2arc_free_on_write; /* free after write list ptr */
635 633 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
636 634 static uint64_t l2arc_ndev; /* number of devices */
637 635
638 636 typedef struct l2arc_read_callback {
639 637 arc_buf_t *l2rcb_buf; /* read buffer */
640 638 spa_t *l2rcb_spa; /* spa */
641 639 blkptr_t l2rcb_bp; /* original blkptr */
642 640 zbookmark_t l2rcb_zb; /* original bookmark */
643 641 int l2rcb_flags; /* original flags */
644 642 } l2arc_read_callback_t;
645 643
646 644 typedef struct l2arc_write_callback {
647 645 l2arc_dev_t *l2wcb_dev; /* device info */
648 646 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
649 647 } l2arc_write_callback_t;
650 648
651 649 struct l2arc_buf_hdr {
652 650 /* protected by arc_buf_hdr mutex */
653 651 l2arc_dev_t *b_dev; /* L2ARC device */
654 652 uint64_t b_daddr; /* disk address, offset byte */
655 653 };
656 654
657 655 typedef struct l2arc_data_free {
658 656 /* protected by l2arc_free_on_write_mtx */
659 657 void *l2df_data;
660 658 size_t l2df_size;
661 659 void (*l2df_func)(void *, size_t);
662 660 list_node_t l2df_list_node;
663 661 } l2arc_data_free_t;
664 662
665 663 static kmutex_t l2arc_feed_thr_lock;
666 664 static kcondvar_t l2arc_feed_thr_cv;
667 665 static uint8_t l2arc_thread_exit;
668 666
669 667 static void l2arc_read_done(zio_t *zio);
670 668 static void l2arc_hdr_stat_add(void);
671 669 static void l2arc_hdr_stat_remove(void);
672 670
673 671 static uint64_t
674 672 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
675 673 {
676 674 uint8_t *vdva = (uint8_t *)dva;
677 675 uint64_t crc = -1ULL;
678 676 int i;
679 677
680 678 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
681 679
682 680 for (i = 0; i < sizeof (dva_t); i++)
683 681 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
684 682
685 683 crc ^= (spa>>8) ^ birth;
686 684
687 685 return (crc);
688 686 }
689 687
690 688 #define BUF_EMPTY(buf) \
691 689 ((buf)->b_dva.dva_word[0] == 0 && \
692 690 (buf)->b_dva.dva_word[1] == 0 && \
693 691 (buf)->b_birth == 0)
694 692
695 693 #define BUF_EQUAL(spa, dva, birth, buf) \
696 694 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
697 695 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
698 696 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
699 697
700 698 static void
701 699 buf_discard_identity(arc_buf_hdr_t *hdr)
702 700 {
703 701 hdr->b_dva.dva_word[0] = 0;
704 702 hdr->b_dva.dva_word[1] = 0;
705 703 hdr->b_birth = 0;
706 704 hdr->b_cksum0 = 0;
707 705 }
708 706
709 707 static arc_buf_hdr_t *
710 708 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
711 709 {
712 710 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
713 711 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
714 712 arc_buf_hdr_t *buf;
715 713
716 714 mutex_enter(hash_lock);
717 715 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
718 716 buf = buf->b_hash_next) {
719 717 if (BUF_EQUAL(spa, dva, birth, buf)) {
720 718 *lockp = hash_lock;
721 719 return (buf);
722 720 }
723 721 }
724 722 mutex_exit(hash_lock);
725 723 *lockp = NULL;
726 724 return (NULL);
727 725 }
728 726
729 727 /*
730 728 * Insert an entry into the hash table. If there is already an element
731 729 * equal to elem in the hash table, then the already existing element
732 730 * will be returned and the new element will not be inserted.
733 731 * Otherwise returns NULL.
734 732 */
735 733 static arc_buf_hdr_t *
736 734 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
737 735 {
738 736 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
739 737 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
740 738 arc_buf_hdr_t *fbuf;
741 739 uint32_t i;
742 740
743 741 ASSERT(!HDR_IN_HASH_TABLE(buf));
744 742 *lockp = hash_lock;
745 743 mutex_enter(hash_lock);
746 744 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
747 745 fbuf = fbuf->b_hash_next, i++) {
748 746 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
749 747 return (fbuf);
750 748 }
751 749
752 750 buf->b_hash_next = buf_hash_table.ht_table[idx];
753 751 buf_hash_table.ht_table[idx] = buf;
754 752 buf->b_flags |= ARC_IN_HASH_TABLE;
755 753
756 754 /* collect some hash table performance data */
757 755 if (i > 0) {
758 756 ARCSTAT_BUMP(arcstat_hash_collisions);
759 757 if (i == 1)
760 758 ARCSTAT_BUMP(arcstat_hash_chains);
761 759
762 760 ARCSTAT_MAX(arcstat_hash_chain_max, i);
763 761 }
764 762
765 763 ARCSTAT_BUMP(arcstat_hash_elements);
766 764 ARCSTAT_MAXSTAT(arcstat_hash_elements);
767 765
768 766 return (NULL);
769 767 }
770 768
771 769 static void
772 770 buf_hash_remove(arc_buf_hdr_t *buf)
773 771 {
774 772 arc_buf_hdr_t *fbuf, **bufp;
775 773 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
776 774
777 775 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
778 776 ASSERT(HDR_IN_HASH_TABLE(buf));
779 777
780 778 bufp = &buf_hash_table.ht_table[idx];
781 779 while ((fbuf = *bufp) != buf) {
782 780 ASSERT(fbuf != NULL);
783 781 bufp = &fbuf->b_hash_next;
784 782 }
785 783 *bufp = buf->b_hash_next;
786 784 buf->b_hash_next = NULL;
787 785 buf->b_flags &= ~ARC_IN_HASH_TABLE;
788 786
789 787 /* collect some hash table performance data */
790 788 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
791 789
792 790 if (buf_hash_table.ht_table[idx] &&
793 791 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
794 792 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
795 793 }
796 794
797 795 /*
798 796 * Global data structures and functions for the buf kmem cache.
799 797 */
800 798 static kmem_cache_t *hdr_cache;
801 799 static kmem_cache_t *buf_cache;
802 800
803 801 static void
804 802 buf_fini(void)
805 803 {
806 804 int i;
807 805
808 806 kmem_free(buf_hash_table.ht_table,
809 807 (buf_hash_table.ht_mask + 1) * sizeof (void *));
810 808 for (i = 0; i < BUF_LOCKS; i++)
811 809 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
812 810 kmem_cache_destroy(hdr_cache);
813 811 kmem_cache_destroy(buf_cache);
814 812 }
815 813
816 814 /*
817 815 * Constructor callback - called when the cache is empty
818 816 * and a new buf is requested.
819 817 */
820 818 /* ARGSUSED */
821 819 static int
822 820 hdr_cons(void *vbuf, void *unused, int kmflag)
823 821 {
824 822 arc_buf_hdr_t *buf = vbuf;
825 823
826 824 bzero(buf, sizeof (arc_buf_hdr_t));
827 825 refcount_create(&buf->b_refcnt);
828 826 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
829 827 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
830 828 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
831 829
832 830 return (0);
833 831 }
834 832
835 833 /* ARGSUSED */
836 834 static int
837 835 buf_cons(void *vbuf, void *unused, int kmflag)
838 836 {
839 837 arc_buf_t *buf = vbuf;
840 838
841 839 bzero(buf, sizeof (arc_buf_t));
842 840 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
843 841 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
844 842
845 843 return (0);
846 844 }
847 845
848 846 /*
849 847 * Destructor callback - called when a cached buf is
850 848 * no longer required.
851 849 */
852 850 /* ARGSUSED */
853 851 static void
854 852 hdr_dest(void *vbuf, void *unused)
855 853 {
856 854 arc_buf_hdr_t *buf = vbuf;
857 855
858 856 ASSERT(BUF_EMPTY(buf));
859 857 refcount_destroy(&buf->b_refcnt);
860 858 cv_destroy(&buf->b_cv);
861 859 mutex_destroy(&buf->b_freeze_lock);
862 860 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
863 861 }
864 862
865 863 /* ARGSUSED */
866 864 static void
867 865 buf_dest(void *vbuf, void *unused)
868 866 {
869 867 arc_buf_t *buf = vbuf;
870 868
871 869 mutex_destroy(&buf->b_evict_lock);
872 870 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
873 871 }
874 872
875 873 /*
876 874 * Reclaim callback -- invoked when memory is low.
877 875 */
878 876 /* ARGSUSED */
879 877 static void
880 878 hdr_recl(void *unused)
881 879 {
882 880 dprintf("hdr_recl called\n");
883 881 /*
884 882 * umem calls the reclaim func when we destroy the buf cache,
885 883 * which is after we do arc_fini().
886 884 */
887 885 if (!arc_dead)
888 886 cv_signal(&arc_reclaim_thr_cv);
889 887 }
890 888
891 889 static void
892 890 buf_init(void)
893 891 {
894 892 uint64_t *ct;
895 893 uint64_t hsize = 1ULL << 12;
896 894 int i, j;
897 895
898 896 /*
899 897 * The hash table is big enough to fill all of physical memory
900 898 * with an average 64K block size. The table will take up
901 899 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
902 900 */
903 901 while (hsize * 65536 < physmem * PAGESIZE)
904 902 hsize <<= 1;
905 903 retry:
906 904 buf_hash_table.ht_mask = hsize - 1;
907 905 buf_hash_table.ht_table =
908 906 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
909 907 if (buf_hash_table.ht_table == NULL) {
910 908 ASSERT(hsize > (1ULL << 8));
911 909 hsize >>= 1;
912 910 goto retry;
913 911 }
914 912
915 913 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
916 914 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
917 915 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
918 916 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
919 917
920 918 for (i = 0; i < 256; i++)
921 919 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
922 920 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
923 921
924 922 for (i = 0; i < BUF_LOCKS; i++) {
925 923 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
926 924 NULL, MUTEX_DEFAULT, NULL);
927 925 }
928 926 }
929 927
930 928 #define ARC_MINTIME (hz>>4) /* 62 ms */
931 929
932 930 static void
933 931 arc_cksum_verify(arc_buf_t *buf)
934 932 {
935 933 zio_cksum_t zc;
936 934
937 935 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
938 936 return;
939 937
940 938 mutex_enter(&buf->b_hdr->b_freeze_lock);
941 939 if (buf->b_hdr->b_freeze_cksum == NULL ||
942 940 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
943 941 mutex_exit(&buf->b_hdr->b_freeze_lock);
944 942 return;
945 943 }
946 944 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
947 945 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
948 946 panic("buffer modified while frozen!");
949 947 mutex_exit(&buf->b_hdr->b_freeze_lock);
950 948 }
951 949
952 950 static int
953 951 arc_cksum_equal(arc_buf_t *buf)
954 952 {
955 953 zio_cksum_t zc;
956 954 int equal;
957 955
958 956 mutex_enter(&buf->b_hdr->b_freeze_lock);
959 957 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
960 958 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
961 959 mutex_exit(&buf->b_hdr->b_freeze_lock);
962 960
963 961 return (equal);
964 962 }
965 963
966 964 static void
967 965 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
968 966 {
969 967 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
970 968 return;
971 969
972 970 mutex_enter(&buf->b_hdr->b_freeze_lock);
973 971 if (buf->b_hdr->b_freeze_cksum != NULL) {
974 972 mutex_exit(&buf->b_hdr->b_freeze_lock);
975 973 return;
976 974 }
977 975 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
978 976 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
979 977 buf->b_hdr->b_freeze_cksum);
980 978 mutex_exit(&buf->b_hdr->b_freeze_lock);
981 979 arc_buf_watch(buf);
982 980 }
983 981
984 982 #ifndef _KERNEL
985 983 typedef struct procctl {
986 984 long cmd;
987 985 prwatch_t prwatch;
988 986 } procctl_t;
989 987 #endif
990 988
991 989 /* ARGSUSED */
992 990 static void
993 991 arc_buf_unwatch(arc_buf_t *buf)
994 992 {
995 993 #ifndef _KERNEL
996 994 if (arc_watch) {
997 995 int result;
998 996 procctl_t ctl;
999 997 ctl.cmd = PCWATCH;
1000 998 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1001 999 ctl.prwatch.pr_size = 0;
1002 1000 ctl.prwatch.pr_wflags = 0;
1003 1001 result = write(arc_procfd, &ctl, sizeof (ctl));
1004 1002 ASSERT3U(result, ==, sizeof (ctl));
1005 1003 }
1006 1004 #endif
1007 1005 }
1008 1006
1009 1007 /* ARGSUSED */
1010 1008 static void
1011 1009 arc_buf_watch(arc_buf_t *buf)
1012 1010 {
1013 1011 #ifndef _KERNEL
1014 1012 if (arc_watch) {
1015 1013 int result;
1016 1014 procctl_t ctl;
1017 1015 ctl.cmd = PCWATCH;
1018 1016 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1019 1017 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1020 1018 ctl.prwatch.pr_wflags = WA_WRITE;
1021 1019 result = write(arc_procfd, &ctl, sizeof (ctl));
1022 1020 ASSERT3U(result, ==, sizeof (ctl));
1023 1021 }
1024 1022 #endif
1025 1023 }
1026 1024
1027 1025 void
1028 1026 arc_buf_thaw(arc_buf_t *buf)
1029 1027 {
1030 1028 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1031 1029 if (buf->b_hdr->b_state != arc_anon)
1032 1030 panic("modifying non-anon buffer!");
1033 1031 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1034 1032 panic("modifying buffer while i/o in progress!");
1035 1033 arc_cksum_verify(buf);
1036 1034 }
1037 1035
1038 1036 mutex_enter(&buf->b_hdr->b_freeze_lock);
1039 1037 if (buf->b_hdr->b_freeze_cksum != NULL) {
1040 1038 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1041 1039 buf->b_hdr->b_freeze_cksum = NULL;
1042 1040 }
1043 1041
1044 1042 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1045 1043 if (buf->b_hdr->b_thawed)
1046 1044 kmem_free(buf->b_hdr->b_thawed, 1);
1047 1045 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1048 1046 }
1049 1047
1050 1048 mutex_exit(&buf->b_hdr->b_freeze_lock);
1051 1049
1052 1050 arc_buf_unwatch(buf);
1053 1051 }
1054 1052
1055 1053 void
1056 1054 arc_buf_freeze(arc_buf_t *buf)
1057 1055 {
1058 1056 kmutex_t *hash_lock;
1059 1057
1060 1058 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1061 1059 return;
1062 1060
1063 1061 hash_lock = HDR_LOCK(buf->b_hdr);
1064 1062 mutex_enter(hash_lock);
1065 1063
1066 1064 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1067 1065 buf->b_hdr->b_state == arc_anon);
1068 1066 arc_cksum_compute(buf, B_FALSE);
1069 1067 mutex_exit(hash_lock);
1070 1068
1071 1069 }
1072 1070
1073 1071 static void
1074 1072 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1075 1073 {
1076 1074 ASSERT(MUTEX_HELD(hash_lock));
1077 1075
1078 1076 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1079 1077 (ab->b_state != arc_anon)) {
1080 1078 uint64_t delta = ab->b_size * ab->b_datacnt;
1081 1079 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1082 1080 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1083 1081
1084 1082 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1085 1083 mutex_enter(&ab->b_state->arcs_mtx);
1086 1084 ASSERT(list_link_active(&ab->b_arc_node));
1087 1085 list_remove(list, ab);
1088 1086 if (GHOST_STATE(ab->b_state)) {
1089 1087 ASSERT0(ab->b_datacnt);
1090 1088 ASSERT3P(ab->b_buf, ==, NULL);
1091 1089 delta = ab->b_size;
1092 1090 }
1093 1091 ASSERT(delta > 0);
1094 1092 ASSERT3U(*size, >=, delta);
1095 1093 atomic_add_64(size, -delta);
1096 1094 mutex_exit(&ab->b_state->arcs_mtx);
1097 1095 /* remove the prefetch flag if we get a reference */
1098 1096 if (ab->b_flags & ARC_PREFETCH)
1099 1097 ab->b_flags &= ~ARC_PREFETCH;
1100 1098 }
1101 1099 }
1102 1100
1103 1101 static int
1104 1102 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1105 1103 {
1106 1104 int cnt;
1107 1105 arc_state_t *state = ab->b_state;
1108 1106
1109 1107 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1110 1108 ASSERT(!GHOST_STATE(state));
1111 1109
1112 1110 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1113 1111 (state != arc_anon)) {
1114 1112 uint64_t *size = &state->arcs_lsize[ab->b_type];
1115 1113
1116 1114 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1117 1115 mutex_enter(&state->arcs_mtx);
1118 1116 ASSERT(!list_link_active(&ab->b_arc_node));
1119 1117 list_insert_head(&state->arcs_list[ab->b_type], ab);
1120 1118 ASSERT(ab->b_datacnt > 0);
1121 1119 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1122 1120 mutex_exit(&state->arcs_mtx);
1123 1121 }
1124 1122 return (cnt);
1125 1123 }
1126 1124
1127 1125 /*
1128 1126 * Move the supplied buffer to the indicated state. The mutex
1129 1127 * for the buffer must be held by the caller.
1130 1128 */
1131 1129 static void
1132 1130 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1133 1131 {
1134 1132 arc_state_t *old_state = ab->b_state;
1135 1133 int64_t refcnt = refcount_count(&ab->b_refcnt);
1136 1134 uint64_t from_delta, to_delta;
1137 1135
1138 1136 ASSERT(MUTEX_HELD(hash_lock));
1139 1137 ASSERT(new_state != old_state);
1140 1138 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1141 1139 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1142 1140 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1143 1141
1144 1142 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1145 1143
1146 1144 /*
1147 1145 * If this buffer is evictable, transfer it from the
1148 1146 * old state list to the new state list.
1149 1147 */
1150 1148 if (refcnt == 0) {
1151 1149 if (old_state != arc_anon) {
1152 1150 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1153 1151 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1154 1152
1155 1153 if (use_mutex)
1156 1154 mutex_enter(&old_state->arcs_mtx);
1157 1155
1158 1156 ASSERT(list_link_active(&ab->b_arc_node));
1159 1157 list_remove(&old_state->arcs_list[ab->b_type], ab);
1160 1158
1161 1159 /*
1162 1160 * If prefetching out of the ghost cache,
1163 1161 * we will have a non-zero datacnt.
1164 1162 */
1165 1163 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1166 1164 /* ghost elements have a ghost size */
1167 1165 ASSERT(ab->b_buf == NULL);
1168 1166 from_delta = ab->b_size;
1169 1167 }
1170 1168 ASSERT3U(*size, >=, from_delta);
1171 1169 atomic_add_64(size, -from_delta);
1172 1170
1173 1171 if (use_mutex)
1174 1172 mutex_exit(&old_state->arcs_mtx);
1175 1173 }
1176 1174 if (new_state != arc_anon) {
1177 1175 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1178 1176 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1179 1177
1180 1178 if (use_mutex)
1181 1179 mutex_enter(&new_state->arcs_mtx);
1182 1180
1183 1181 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1184 1182
1185 1183 /* ghost elements have a ghost size */
1186 1184 if (GHOST_STATE(new_state)) {
1187 1185 ASSERT(ab->b_datacnt == 0);
1188 1186 ASSERT(ab->b_buf == NULL);
1189 1187 to_delta = ab->b_size;
1190 1188 }
1191 1189 atomic_add_64(size, to_delta);
1192 1190
1193 1191 if (use_mutex)
1194 1192 mutex_exit(&new_state->arcs_mtx);
1195 1193 }
1196 1194 }
1197 1195
1198 1196 ASSERT(!BUF_EMPTY(ab));
1199 1197 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1200 1198 buf_hash_remove(ab);
1201 1199
1202 1200 /* adjust state sizes */
1203 1201 if (to_delta)
1204 1202 atomic_add_64(&new_state->arcs_size, to_delta);
1205 1203 if (from_delta) {
1206 1204 ASSERT3U(old_state->arcs_size, >=, from_delta);
1207 1205 atomic_add_64(&old_state->arcs_size, -from_delta);
1208 1206 }
1209 1207 ab->b_state = new_state;
1210 1208
1211 1209 /* adjust l2arc hdr stats */
1212 1210 if (new_state == arc_l2c_only)
1213 1211 l2arc_hdr_stat_add();
1214 1212 else if (old_state == arc_l2c_only)
1215 1213 l2arc_hdr_stat_remove();
1216 1214 }
1217 1215
1218 1216 void
1219 1217 arc_space_consume(uint64_t space, arc_space_type_t type)
1220 1218 {
1221 1219 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1222 1220
1223 1221 switch (type) {
1224 1222 case ARC_SPACE_DATA:
1225 1223 ARCSTAT_INCR(arcstat_data_size, space);
1226 1224 break;
1227 1225 case ARC_SPACE_OTHER:
1228 1226 ARCSTAT_INCR(arcstat_other_size, space);
1229 1227 break;
1230 1228 case ARC_SPACE_HDRS:
1231 1229 ARCSTAT_INCR(arcstat_hdr_size, space);
1232 1230 break;
1233 1231 case ARC_SPACE_L2HDRS:
1234 1232 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1235 1233 break;
1236 1234 }
1237 1235
1238 1236 ARCSTAT_INCR(arcstat_meta_used, space);
1239 1237 atomic_add_64(&arc_size, space);
1240 1238 }
1241 1239
1242 1240 void
1243 1241 arc_space_return(uint64_t space, arc_space_type_t type)
1244 1242 {
1245 1243 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1246 1244
1247 1245 switch (type) {
1248 1246 case ARC_SPACE_DATA:
1249 1247 ARCSTAT_INCR(arcstat_data_size, -space);
1250 1248 break;
1251 1249 case ARC_SPACE_OTHER:
1252 1250 ARCSTAT_INCR(arcstat_other_size, -space);
1253 1251 break;
1254 1252 case ARC_SPACE_HDRS:
1255 1253 ARCSTAT_INCR(arcstat_hdr_size, -space);
1256 1254 break;
1257 1255 case ARC_SPACE_L2HDRS:
1258 1256 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1259 1257 break;
1260 1258 }
1261 1259
1262 1260 ASSERT(arc_meta_used >= space);
1263 1261 if (arc_meta_max < arc_meta_used)
1264 1262 arc_meta_max = arc_meta_used;
1265 1263 ARCSTAT_INCR(arcstat_meta_used, -space);
1266 1264 ASSERT(arc_size >= space);
1267 1265 atomic_add_64(&arc_size, -space);
1268 1266 }
1269 1267
1270 1268 void *
1271 1269 arc_data_buf_alloc(uint64_t size)
1272 1270 {
1273 1271 if (arc_evict_needed(ARC_BUFC_DATA))
1274 1272 cv_signal(&arc_reclaim_thr_cv);
1275 1273 atomic_add_64(&arc_size, size);
1276 1274 return (zio_data_buf_alloc(size));
1277 1275 }
1278 1276
1279 1277 void
1280 1278 arc_data_buf_free(void *buf, uint64_t size)
1281 1279 {
1282 1280 zio_data_buf_free(buf, size);
1283 1281 ASSERT(arc_size >= size);
1284 1282 atomic_add_64(&arc_size, -size);
1285 1283 }
1286 1284
1287 1285 arc_buf_t *
1288 1286 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1289 1287 {
1290 1288 arc_buf_hdr_t *hdr;
1291 1289 arc_buf_t *buf;
1292 1290
1293 1291 ASSERT3U(size, >, 0);
1294 1292 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1295 1293 ASSERT(BUF_EMPTY(hdr));
1296 1294 hdr->b_size = size;
1297 1295 hdr->b_type = type;
1298 1296 hdr->b_spa = spa_load_guid(spa);
1299 1297 hdr->b_state = arc_anon;
1300 1298 hdr->b_arc_access = 0;
1301 1299 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1302 1300 buf->b_hdr = hdr;
1303 1301 buf->b_data = NULL;
1304 1302 buf->b_efunc = NULL;
1305 1303 buf->b_private = NULL;
1306 1304 buf->b_next = NULL;
1307 1305 hdr->b_buf = buf;
1308 1306 arc_get_data_buf(buf);
1309 1307 hdr->b_datacnt = 1;
1310 1308 hdr->b_flags = 0;
1311 1309 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1312 1310 (void) refcount_add(&hdr->b_refcnt, tag);
1313 1311
1314 1312 return (buf);
1315 1313 }
1316 1314
1317 1315 static char *arc_onloan_tag = "onloan";
1318 1316
1319 1317 /*
1320 1318 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1321 1319 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1322 1320 * buffers must be returned to the arc before they can be used by the DMU or
1323 1321 * freed.
1324 1322 */
1325 1323 arc_buf_t *
1326 1324 arc_loan_buf(spa_t *spa, int size)
1327 1325 {
1328 1326 arc_buf_t *buf;
1329 1327
1330 1328 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1331 1329
1332 1330 atomic_add_64(&arc_loaned_bytes, size);
1333 1331 return (buf);
1334 1332 }
1335 1333
1336 1334 /*
1337 1335 * Return a loaned arc buffer to the arc.
1338 1336 */
1339 1337 void
1340 1338 arc_return_buf(arc_buf_t *buf, void *tag)
1341 1339 {
1342 1340 arc_buf_hdr_t *hdr = buf->b_hdr;
1343 1341
1344 1342 ASSERT(buf->b_data != NULL);
1345 1343 (void) refcount_add(&hdr->b_refcnt, tag);
1346 1344 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1347 1345
1348 1346 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1349 1347 }
1350 1348
1351 1349 /* Detach an arc_buf from a dbuf (tag) */
1352 1350 void
1353 1351 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1354 1352 {
1355 1353 arc_buf_hdr_t *hdr;
1356 1354
1357 1355 ASSERT(buf->b_data != NULL);
1358 1356 hdr = buf->b_hdr;
1359 1357 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1360 1358 (void) refcount_remove(&hdr->b_refcnt, tag);
1361 1359 buf->b_efunc = NULL;
1362 1360 buf->b_private = NULL;
1363 1361
1364 1362 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1365 1363 }
1366 1364
1367 1365 static arc_buf_t *
1368 1366 arc_buf_clone(arc_buf_t *from)
1369 1367 {
1370 1368 arc_buf_t *buf;
1371 1369 arc_buf_hdr_t *hdr = from->b_hdr;
1372 1370 uint64_t size = hdr->b_size;
1373 1371
1374 1372 ASSERT(hdr->b_state != arc_anon);
1375 1373
1376 1374 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1377 1375 buf->b_hdr = hdr;
1378 1376 buf->b_data = NULL;
1379 1377 buf->b_efunc = NULL;
1380 1378 buf->b_private = NULL;
1381 1379 buf->b_next = hdr->b_buf;
1382 1380 hdr->b_buf = buf;
1383 1381 arc_get_data_buf(buf);
1384 1382 bcopy(from->b_data, buf->b_data, size);
1385 1383
1386 1384 /*
1387 1385 * This buffer already exists in the arc so create a duplicate
1388 1386 * copy for the caller. If the buffer is associated with user data
1389 1387 * then track the size and number of duplicates. These stats will be
1390 1388 * updated as duplicate buffers are created and destroyed.
1391 1389 */
1392 1390 if (hdr->b_type == ARC_BUFC_DATA) {
1393 1391 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1394 1392 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1395 1393 }
1396 1394 hdr->b_datacnt += 1;
1397 1395 return (buf);
1398 1396 }
1399 1397
1400 1398 void
1401 1399 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1402 1400 {
1403 1401 arc_buf_hdr_t *hdr;
1404 1402 kmutex_t *hash_lock;
1405 1403
1406 1404 /*
1407 1405 * Check to see if this buffer is evicted. Callers
1408 1406 * must verify b_data != NULL to know if the add_ref
1409 1407 * was successful.
1410 1408 */
1411 1409 mutex_enter(&buf->b_evict_lock);
1412 1410 if (buf->b_data == NULL) {
1413 1411 mutex_exit(&buf->b_evict_lock);
1414 1412 return;
1415 1413 }
1416 1414 hash_lock = HDR_LOCK(buf->b_hdr);
1417 1415 mutex_enter(hash_lock);
1418 1416 hdr = buf->b_hdr;
1419 1417 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1420 1418 mutex_exit(&buf->b_evict_lock);
1421 1419
1422 1420 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1423 1421 add_reference(hdr, hash_lock, tag);
1424 1422 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1425 1423 arc_access(hdr, hash_lock);
1426 1424 mutex_exit(hash_lock);
1427 1425 ARCSTAT_BUMP(arcstat_hits);
1428 1426 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1429 1427 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1430 1428 data, metadata, hits);
1431 1429 }
1432 1430
1433 1431 /*
1434 1432 * Free the arc data buffer. If it is an l2arc write in progress,
1435 1433 * the buffer is placed on l2arc_free_on_write to be freed later.
1436 1434 */
1437 1435 static void
1438 1436 arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1439 1437 {
1440 1438 arc_buf_hdr_t *hdr = buf->b_hdr;
1441 1439
1442 1440 if (HDR_L2_WRITING(hdr)) {
1443 1441 l2arc_data_free_t *df;
1444 1442 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1445 1443 df->l2df_data = buf->b_data;
1446 1444 df->l2df_size = hdr->b_size;
1447 1445 df->l2df_func = free_func;
1448 1446 mutex_enter(&l2arc_free_on_write_mtx);
1449 1447 list_insert_head(l2arc_free_on_write, df);
1450 1448 mutex_exit(&l2arc_free_on_write_mtx);
1451 1449 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1452 1450 } else {
1453 1451 free_func(buf->b_data, hdr->b_size);
1454 1452 }
1455 1453 }
1456 1454
1457 1455 static void
1458 1456 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1459 1457 {
1460 1458 arc_buf_t **bufp;
1461 1459
1462 1460 /* free up data associated with the buf */
1463 1461 if (buf->b_data) {
1464 1462 arc_state_t *state = buf->b_hdr->b_state;
1465 1463 uint64_t size = buf->b_hdr->b_size;
1466 1464 arc_buf_contents_t type = buf->b_hdr->b_type;
1467 1465
1468 1466 arc_cksum_verify(buf);
1469 1467 arc_buf_unwatch(buf);
1470 1468
1471 1469 if (!recycle) {
1472 1470 if (type == ARC_BUFC_METADATA) {
1473 1471 arc_buf_data_free(buf, zio_buf_free);
1474 1472 arc_space_return(size, ARC_SPACE_DATA);
1475 1473 } else {
1476 1474 ASSERT(type == ARC_BUFC_DATA);
1477 1475 arc_buf_data_free(buf, zio_data_buf_free);
1478 1476 ARCSTAT_INCR(arcstat_data_size, -size);
1479 1477 atomic_add_64(&arc_size, -size);
1480 1478 }
1481 1479 }
1482 1480 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1483 1481 uint64_t *cnt = &state->arcs_lsize[type];
1484 1482
1485 1483 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1486 1484 ASSERT(state != arc_anon);
1487 1485
1488 1486 ASSERT3U(*cnt, >=, size);
1489 1487 atomic_add_64(cnt, -size);
1490 1488 }
1491 1489 ASSERT3U(state->arcs_size, >=, size);
1492 1490 atomic_add_64(&state->arcs_size, -size);
1493 1491 buf->b_data = NULL;
1494 1492
1495 1493 /*
1496 1494 * If we're destroying a duplicate buffer make sure
1497 1495 * that the appropriate statistics are updated.
1498 1496 */
1499 1497 if (buf->b_hdr->b_datacnt > 1 &&
1500 1498 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1501 1499 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1502 1500 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1503 1501 }
1504 1502 ASSERT(buf->b_hdr->b_datacnt > 0);
1505 1503 buf->b_hdr->b_datacnt -= 1;
1506 1504 }
1507 1505
1508 1506 /* only remove the buf if requested */
1509 1507 if (!all)
1510 1508 return;
1511 1509
1512 1510 /* remove the buf from the hdr list */
1513 1511 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1514 1512 continue;
1515 1513 *bufp = buf->b_next;
1516 1514 buf->b_next = NULL;
1517 1515
1518 1516 ASSERT(buf->b_efunc == NULL);
1519 1517
1520 1518 /* clean up the buf */
1521 1519 buf->b_hdr = NULL;
1522 1520 kmem_cache_free(buf_cache, buf);
1523 1521 }
1524 1522
1525 1523 static void
1526 1524 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1527 1525 {
1528 1526 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1529 1527 ASSERT3P(hdr->b_state, ==, arc_anon);
1530 1528 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1531 1529 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1532 1530
1533 1531 if (l2hdr != NULL) {
1534 1532 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1535 1533 /*
1536 1534 * To prevent arc_free() and l2arc_evict() from
1537 1535 * attempting to free the same buffer at the same time,
1538 1536 * a FREE_IN_PROGRESS flag is given to arc_free() to
1539 1537 * give it priority. l2arc_evict() can't destroy this
1540 1538 * header while we are waiting on l2arc_buflist_mtx.
1541 1539 *
1542 1540 * The hdr may be removed from l2ad_buflist before we
1543 1541 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1544 1542 */
1545 1543 if (!buflist_held) {
1546 1544 mutex_enter(&l2arc_buflist_mtx);
1547 1545 l2hdr = hdr->b_l2hdr;
1548 1546 }
1549 1547
1550 1548 if (l2hdr != NULL) {
1551 1549 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1552 1550 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1553 1551 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1554 1552 if (hdr->b_state == arc_l2c_only)
1555 1553 l2arc_hdr_stat_remove();
1556 1554 hdr->b_l2hdr = NULL;
1557 1555 }
1558 1556
1559 1557 if (!buflist_held)
1560 1558 mutex_exit(&l2arc_buflist_mtx);
1561 1559 }
1562 1560
1563 1561 if (!BUF_EMPTY(hdr)) {
1564 1562 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1565 1563 buf_discard_identity(hdr);
1566 1564 }
1567 1565 while (hdr->b_buf) {
1568 1566 arc_buf_t *buf = hdr->b_buf;
1569 1567
1570 1568 if (buf->b_efunc) {
1571 1569 mutex_enter(&arc_eviction_mtx);
1572 1570 mutex_enter(&buf->b_evict_lock);
1573 1571 ASSERT(buf->b_hdr != NULL);
1574 1572 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1575 1573 hdr->b_buf = buf->b_next;
1576 1574 buf->b_hdr = &arc_eviction_hdr;
1577 1575 buf->b_next = arc_eviction_list;
1578 1576 arc_eviction_list = buf;
1579 1577 mutex_exit(&buf->b_evict_lock);
1580 1578 mutex_exit(&arc_eviction_mtx);
1581 1579 } else {
1582 1580 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1583 1581 }
1584 1582 }
1585 1583 if (hdr->b_freeze_cksum != NULL) {
1586 1584 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1587 1585 hdr->b_freeze_cksum = NULL;
1588 1586 }
1589 1587 if (hdr->b_thawed) {
1590 1588 kmem_free(hdr->b_thawed, 1);
1591 1589 hdr->b_thawed = NULL;
1592 1590 }
1593 1591
1594 1592 ASSERT(!list_link_active(&hdr->b_arc_node));
1595 1593 ASSERT3P(hdr->b_hash_next, ==, NULL);
1596 1594 ASSERT3P(hdr->b_acb, ==, NULL);
1597 1595 kmem_cache_free(hdr_cache, hdr);
1598 1596 }
1599 1597
1600 1598 void
1601 1599 arc_buf_free(arc_buf_t *buf, void *tag)
1602 1600 {
1603 1601 arc_buf_hdr_t *hdr = buf->b_hdr;
1604 1602 int hashed = hdr->b_state != arc_anon;
1605 1603
1606 1604 ASSERT(buf->b_efunc == NULL);
1607 1605 ASSERT(buf->b_data != NULL);
1608 1606
1609 1607 if (hashed) {
1610 1608 kmutex_t *hash_lock = HDR_LOCK(hdr);
1611 1609
1612 1610 mutex_enter(hash_lock);
1613 1611 hdr = buf->b_hdr;
1614 1612 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1615 1613
1616 1614 (void) remove_reference(hdr, hash_lock, tag);
1617 1615 if (hdr->b_datacnt > 1) {
1618 1616 arc_buf_destroy(buf, FALSE, TRUE);
1619 1617 } else {
1620 1618 ASSERT(buf == hdr->b_buf);
1621 1619 ASSERT(buf->b_efunc == NULL);
1622 1620 hdr->b_flags |= ARC_BUF_AVAILABLE;
1623 1621 }
1624 1622 mutex_exit(hash_lock);
1625 1623 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1626 1624 int destroy_hdr;
1627 1625 /*
1628 1626 * We are in the middle of an async write. Don't destroy
1629 1627 * this buffer unless the write completes before we finish
1630 1628 * decrementing the reference count.
1631 1629 */
1632 1630 mutex_enter(&arc_eviction_mtx);
1633 1631 (void) remove_reference(hdr, NULL, tag);
1634 1632 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1635 1633 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1636 1634 mutex_exit(&arc_eviction_mtx);
1637 1635 if (destroy_hdr)
1638 1636 arc_hdr_destroy(hdr);
1639 1637 } else {
1640 1638 if (remove_reference(hdr, NULL, tag) > 0)
1641 1639 arc_buf_destroy(buf, FALSE, TRUE);
1642 1640 else
1643 1641 arc_hdr_destroy(hdr);
1644 1642 }
1645 1643 }
1646 1644
1647 1645 boolean_t
1648 1646 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1649 1647 {
1650 1648 arc_buf_hdr_t *hdr = buf->b_hdr;
1651 1649 kmutex_t *hash_lock = HDR_LOCK(hdr);
1652 1650 boolean_t no_callback = (buf->b_efunc == NULL);
1653 1651
1654 1652 if (hdr->b_state == arc_anon) {
1655 1653 ASSERT(hdr->b_datacnt == 1);
1656 1654 arc_buf_free(buf, tag);
1657 1655 return (no_callback);
1658 1656 }
1659 1657
1660 1658 mutex_enter(hash_lock);
1661 1659 hdr = buf->b_hdr;
1662 1660 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1663 1661 ASSERT(hdr->b_state != arc_anon);
1664 1662 ASSERT(buf->b_data != NULL);
1665 1663
1666 1664 (void) remove_reference(hdr, hash_lock, tag);
1667 1665 if (hdr->b_datacnt > 1) {
1668 1666 if (no_callback)
1669 1667 arc_buf_destroy(buf, FALSE, TRUE);
1670 1668 } else if (no_callback) {
1671 1669 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1672 1670 ASSERT(buf->b_efunc == NULL);
1673 1671 hdr->b_flags |= ARC_BUF_AVAILABLE;
1674 1672 }
1675 1673 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1676 1674 refcount_is_zero(&hdr->b_refcnt));
1677 1675 mutex_exit(hash_lock);
1678 1676 return (no_callback);
1679 1677 }
1680 1678
1681 1679 int
1682 1680 arc_buf_size(arc_buf_t *buf)
1683 1681 {
1684 1682 return (buf->b_hdr->b_size);
1685 1683 }
1686 1684
1687 1685 /*
1688 1686 * Called from the DMU to determine if the current buffer should be
1689 1687 * evicted. In order to ensure proper locking, the eviction must be initiated
1690 1688 * from the DMU. Return true if the buffer is associated with user data and
1691 1689 * duplicate buffers still exist.
1692 1690 */
1693 1691 boolean_t
1694 1692 arc_buf_eviction_needed(arc_buf_t *buf)
1695 1693 {
1696 1694 arc_buf_hdr_t *hdr;
1697 1695 boolean_t evict_needed = B_FALSE;
1698 1696
1699 1697 if (zfs_disable_dup_eviction)
1700 1698 return (B_FALSE);
1701 1699
1702 1700 mutex_enter(&buf->b_evict_lock);
1703 1701 hdr = buf->b_hdr;
1704 1702 if (hdr == NULL) {
1705 1703 /*
1706 1704 * We are in arc_do_user_evicts(); let that function
1707 1705 * perform the eviction.
1708 1706 */
1709 1707 ASSERT(buf->b_data == NULL);
1710 1708 mutex_exit(&buf->b_evict_lock);
1711 1709 return (B_FALSE);
1712 1710 } else if (buf->b_data == NULL) {
1713 1711 /*
1714 1712 * We have already been added to the arc eviction list;
1715 1713 * recommend eviction.
1716 1714 */
1717 1715 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1718 1716 mutex_exit(&buf->b_evict_lock);
1719 1717 return (B_TRUE);
1720 1718 }
1721 1719
1722 1720 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1723 1721 evict_needed = B_TRUE;
1724 1722
1725 1723 mutex_exit(&buf->b_evict_lock);
1726 1724 return (evict_needed);
1727 1725 }
1728 1726
1729 1727 /*
1730 1728 * Evict buffers from list until we've removed the specified number of
1731 1729 * bytes. Move the removed buffers to the appropriate evict state.
1732 1730 * If the recycle flag is set, then attempt to "recycle" a buffer:
1733 1731 * - look for a buffer to evict that is `bytes' long.
1734 1732 * - return the data block from this buffer rather than freeing it.
1735 1733 * This flag is used by callers that are trying to make space for a
1736 1734 * new buffer in a full arc cache.
1737 1735 *
1738 1736 * This function makes a "best effort". It skips over any buffers
1739 1737 * it can't get a hash_lock on, and so may not catch all candidates.
1740 1738 * It may also return without evicting as much space as requested.
1741 1739 */
1742 1740 static void *
1743 1741 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1744 1742 arc_buf_contents_t type)
1745 1743 {
1746 1744 arc_state_t *evicted_state;
1747 1745 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1748 1746 arc_buf_hdr_t *ab, *ab_prev = NULL;
1749 1747 list_t *list = &state->arcs_list[type];
1750 1748 kmutex_t *hash_lock;
1751 1749 boolean_t have_lock;
1752 1750 void *stolen = NULL;
1753 1751
1754 1752 ASSERT(state == arc_mru || state == arc_mfu);
1755 1753
1756 1754 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1757 1755
1758 1756 mutex_enter(&state->arcs_mtx);
1759 1757 mutex_enter(&evicted_state->arcs_mtx);
1760 1758
1761 1759 for (ab = list_tail(list); ab; ab = ab_prev) {
1762 1760 ab_prev = list_prev(list, ab);
1763 1761 /* prefetch buffers have a minimum lifespan */
1764 1762 if (HDR_IO_IN_PROGRESS(ab) ||
1765 1763 (spa && ab->b_spa != spa) ||
1766 1764 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1767 1765 ddi_get_lbolt() - ab->b_arc_access <
1768 1766 arc_min_prefetch_lifespan)) {
1769 1767 skipped++;
1770 1768 continue;
1771 1769 }
1772 1770 /* "lookahead" for better eviction candidate */
1773 1771 if (recycle && ab->b_size != bytes &&
1774 1772 ab_prev && ab_prev->b_size == bytes)
1775 1773 continue;
1776 1774 hash_lock = HDR_LOCK(ab);
1777 1775 have_lock = MUTEX_HELD(hash_lock);
1778 1776 if (have_lock || mutex_tryenter(hash_lock)) {
1779 1777 ASSERT0(refcount_count(&ab->b_refcnt));
1780 1778 ASSERT(ab->b_datacnt > 0);
1781 1779 while (ab->b_buf) {
1782 1780 arc_buf_t *buf = ab->b_buf;
1783 1781 if (!mutex_tryenter(&buf->b_evict_lock)) {
1784 1782 missed += 1;
1785 1783 break;
1786 1784 }
1787 1785 if (buf->b_data) {
1788 1786 bytes_evicted += ab->b_size;
1789 1787 if (recycle && ab->b_type == type &&
1790 1788 ab->b_size == bytes &&
1791 1789 !HDR_L2_WRITING(ab)) {
1792 1790 stolen = buf->b_data;
1793 1791 recycle = FALSE;
1794 1792 }
1795 1793 }
1796 1794 if (buf->b_efunc) {
1797 1795 mutex_enter(&arc_eviction_mtx);
1798 1796 arc_buf_destroy(buf,
1799 1797 buf->b_data == stolen, FALSE);
1800 1798 ab->b_buf = buf->b_next;
1801 1799 buf->b_hdr = &arc_eviction_hdr;
1802 1800 buf->b_next = arc_eviction_list;
1803 1801 arc_eviction_list = buf;
1804 1802 mutex_exit(&arc_eviction_mtx);
1805 1803 mutex_exit(&buf->b_evict_lock);
1806 1804 } else {
1807 1805 mutex_exit(&buf->b_evict_lock);
1808 1806 arc_buf_destroy(buf,
1809 1807 buf->b_data == stolen, TRUE);
1810 1808 }
1811 1809 }
1812 1810
1813 1811 if (ab->b_l2hdr) {
1814 1812 ARCSTAT_INCR(arcstat_evict_l2_cached,
1815 1813 ab->b_size);
1816 1814 } else {
1817 1815 if (l2arc_write_eligible(ab->b_spa, ab)) {
1818 1816 ARCSTAT_INCR(arcstat_evict_l2_eligible,
1819 1817 ab->b_size);
1820 1818 } else {
1821 1819 ARCSTAT_INCR(
1822 1820 arcstat_evict_l2_ineligible,
1823 1821 ab->b_size);
1824 1822 }
1825 1823 }
1826 1824
1827 1825 if (ab->b_datacnt == 0) {
1828 1826 arc_change_state(evicted_state, ab, hash_lock);
1829 1827 ASSERT(HDR_IN_HASH_TABLE(ab));
1830 1828 ab->b_flags |= ARC_IN_HASH_TABLE;
1831 1829 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1832 1830 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1833 1831 }
1834 1832 if (!have_lock)
1835 1833 mutex_exit(hash_lock);
1836 1834 if (bytes >= 0 && bytes_evicted >= bytes)
1837 1835 break;
1838 1836 } else {
1839 1837 missed += 1;
1840 1838 }
1841 1839 }
1842 1840
1843 1841 mutex_exit(&evicted_state->arcs_mtx);
1844 1842 mutex_exit(&state->arcs_mtx);
1845 1843
1846 1844 if (bytes_evicted < bytes)
1847 1845 dprintf("only evicted %lld bytes from %x",
1848 1846 (longlong_t)bytes_evicted, state);
1849 1847
1850 1848 if (skipped)
1851 1849 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1852 1850
1853 1851 if (missed)
1854 1852 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1855 1853
1856 1854 /*
1857 1855 * We have just evicted some data into the ghost state, make
1858 1856 * sure we also adjust the ghost state size if necessary.
1859 1857 */
1860 1858 if (arc_no_grow &&
1861 1859 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1862 1860 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1863 1861 arc_mru_ghost->arcs_size - arc_c;
1864 1862
1865 1863 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1866 1864 int64_t todelete =
1867 1865 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1868 1866 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1869 1867 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1870 1868 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1871 1869 arc_mru_ghost->arcs_size +
1872 1870 arc_mfu_ghost->arcs_size - arc_c);
1873 1871 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1874 1872 }
1875 1873 }
1876 1874
1877 1875 return (stolen);
1878 1876 }
1879 1877
1880 1878 /*
1881 1879 * Remove buffers from list until we've removed the specified number of
1882 1880 * bytes. Destroy the buffers that are removed.
1883 1881 */
1884 1882 static void
1885 1883 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1886 1884 {
1887 1885 arc_buf_hdr_t *ab, *ab_prev;
1888 1886 arc_buf_hdr_t marker = { 0 };
1889 1887 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1890 1888 kmutex_t *hash_lock;
1891 1889 uint64_t bytes_deleted = 0;
1892 1890 uint64_t bufs_skipped = 0;
1893 1891
1894 1892 ASSERT(GHOST_STATE(state));
1895 1893 top:
1896 1894 mutex_enter(&state->arcs_mtx);
1897 1895 for (ab = list_tail(list); ab; ab = ab_prev) {
1898 1896 ab_prev = list_prev(list, ab);
1899 1897 if (spa && ab->b_spa != spa)
1900 1898 continue;
1901 1899
1902 1900 /* ignore markers */
1903 1901 if (ab->b_spa == 0)
1904 1902 continue;
1905 1903
1906 1904 hash_lock = HDR_LOCK(ab);
1907 1905 /* caller may be trying to modify this buffer, skip it */
1908 1906 if (MUTEX_HELD(hash_lock))
1909 1907 continue;
1910 1908 if (mutex_tryenter(hash_lock)) {
1911 1909 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1912 1910 ASSERT(ab->b_buf == NULL);
1913 1911 ARCSTAT_BUMP(arcstat_deleted);
1914 1912 bytes_deleted += ab->b_size;
1915 1913
1916 1914 if (ab->b_l2hdr != NULL) {
1917 1915 /*
1918 1916 * This buffer is cached on the 2nd Level ARC;
1919 1917 * don't destroy the header.
1920 1918 */
1921 1919 arc_change_state(arc_l2c_only, ab, hash_lock);
1922 1920 mutex_exit(hash_lock);
1923 1921 } else {
1924 1922 arc_change_state(arc_anon, ab, hash_lock);
1925 1923 mutex_exit(hash_lock);
1926 1924 arc_hdr_destroy(ab);
1927 1925 }
1928 1926
1929 1927 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1930 1928 if (bytes >= 0 && bytes_deleted >= bytes)
1931 1929 break;
1932 1930 } else if (bytes < 0) {
1933 1931 /*
1934 1932 * Insert a list marker and then wait for the
1935 1933 * hash lock to become available. Once its
1936 1934 * available, restart from where we left off.
1937 1935 */
1938 1936 list_insert_after(list, ab, &marker);
1939 1937 mutex_exit(&state->arcs_mtx);
1940 1938 mutex_enter(hash_lock);
1941 1939 mutex_exit(hash_lock);
1942 1940 mutex_enter(&state->arcs_mtx);
1943 1941 ab_prev = list_prev(list, &marker);
1944 1942 list_remove(list, &marker);
1945 1943 } else
1946 1944 bufs_skipped += 1;
1947 1945 }
1948 1946 mutex_exit(&state->arcs_mtx);
1949 1947
1950 1948 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1951 1949 (bytes < 0 || bytes_deleted < bytes)) {
1952 1950 list = &state->arcs_list[ARC_BUFC_METADATA];
1953 1951 goto top;
1954 1952 }
1955 1953
1956 1954 if (bufs_skipped) {
1957 1955 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1958 1956 ASSERT(bytes >= 0);
1959 1957 }
1960 1958
1961 1959 if (bytes_deleted < bytes)
1962 1960 dprintf("only deleted %lld bytes from %p",
1963 1961 (longlong_t)bytes_deleted, state);
1964 1962 }
1965 1963
1966 1964 static void
1967 1965 arc_adjust(void)
1968 1966 {
1969 1967 int64_t adjustment, delta;
1970 1968
1971 1969 /*
1972 1970 * Adjust MRU size
1973 1971 */
1974 1972
1975 1973 adjustment = MIN((int64_t)(arc_size - arc_c),
1976 1974 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
1977 1975 arc_p));
1978 1976
1979 1977 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1980 1978 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1981 1979 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
1982 1980 adjustment -= delta;
1983 1981 }
1984 1982
1985 1983 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1986 1984 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1987 1985 (void) arc_evict(arc_mru, NULL, delta, FALSE,
1988 1986 ARC_BUFC_METADATA);
1989 1987 }
1990 1988
1991 1989 /*
1992 1990 * Adjust MFU size
1993 1991 */
1994 1992
1995 1993 adjustment = arc_size - arc_c;
1996 1994
1997 1995 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1998 1996 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1999 1997 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
2000 1998 adjustment -= delta;
2001 1999 }
2002 2000
2003 2001 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2004 2002 int64_t delta = MIN(adjustment,
2005 2003 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2006 2004 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
2007 2005 ARC_BUFC_METADATA);
2008 2006 }
2009 2007
2010 2008 /*
2011 2009 * Adjust ghost lists
2012 2010 */
2013 2011
2014 2012 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2015 2013
2016 2014 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2017 2015 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2018 2016 arc_evict_ghost(arc_mru_ghost, NULL, delta);
2019 2017 }
2020 2018
2021 2019 adjustment =
2022 2020 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2023 2021
2024 2022 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2025 2023 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2026 2024 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
2027 2025 }
2028 2026 }
2029 2027
2030 2028 static void
2031 2029 arc_do_user_evicts(void)
2032 2030 {
2033 2031 mutex_enter(&arc_eviction_mtx);
2034 2032 while (arc_eviction_list != NULL) {
2035 2033 arc_buf_t *buf = arc_eviction_list;
2036 2034 arc_eviction_list = buf->b_next;
2037 2035 mutex_enter(&buf->b_evict_lock);
2038 2036 buf->b_hdr = NULL;
2039 2037 mutex_exit(&buf->b_evict_lock);
2040 2038 mutex_exit(&arc_eviction_mtx);
2041 2039
2042 2040 if (buf->b_efunc != NULL)
2043 2041 VERIFY(buf->b_efunc(buf) == 0);
2044 2042
2045 2043 buf->b_efunc = NULL;
2046 2044 buf->b_private = NULL;
2047 2045 kmem_cache_free(buf_cache, buf);
2048 2046 mutex_enter(&arc_eviction_mtx);
2049 2047 }
2050 2048 mutex_exit(&arc_eviction_mtx);
2051 2049 }
2052 2050
2053 2051 /*
2054 2052 * Flush all *evictable* data from the cache for the given spa.
2055 2053 * NOTE: this will not touch "active" (i.e. referenced) data.
2056 2054 */
2057 2055 void
2058 2056 arc_flush(spa_t *spa)
2059 2057 {
2060 2058 uint64_t guid = 0;
2061 2059
2062 2060 if (spa)
2063 2061 guid = spa_load_guid(spa);
2064 2062
2065 2063 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
2066 2064 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2067 2065 if (spa)
2068 2066 break;
2069 2067 }
2070 2068 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
2071 2069 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2072 2070 if (spa)
2073 2071 break;
2074 2072 }
2075 2073 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
2076 2074 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2077 2075 if (spa)
2078 2076 break;
2079 2077 }
2080 2078 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
2081 2079 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2082 2080 if (spa)
2083 2081 break;
2084 2082 }
2085 2083
2086 2084 arc_evict_ghost(arc_mru_ghost, guid, -1);
2087 2085 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2088 2086
2089 2087 mutex_enter(&arc_reclaim_thr_lock);
2090 2088 arc_do_user_evicts();
2091 2089 mutex_exit(&arc_reclaim_thr_lock);
2092 2090 ASSERT(spa || arc_eviction_list == NULL);
2093 2091 }
2094 2092
2095 2093 void
2096 2094 arc_shrink(void)
2097 2095 {
2098 2096 if (arc_c > arc_c_min) {
2099 2097 uint64_t to_free;
2100 2098
2101 2099 #ifdef _KERNEL
2102 2100 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
2103 2101 #else
2104 2102 to_free = arc_c >> arc_shrink_shift;
2105 2103 #endif
2106 2104 if (arc_c > arc_c_min + to_free)
2107 2105 atomic_add_64(&arc_c, -to_free);
2108 2106 else
2109 2107 arc_c = arc_c_min;
2110 2108
2111 2109 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2112 2110 if (arc_c > arc_size)
2113 2111 arc_c = MAX(arc_size, arc_c_min);
2114 2112 if (arc_p > arc_c)
2115 2113 arc_p = (arc_c >> 1);
2116 2114 ASSERT(arc_c >= arc_c_min);
2117 2115 ASSERT((int64_t)arc_p >= 0);
2118 2116 }
2119 2117
2120 2118 if (arc_size > arc_c)
2121 2119 arc_adjust();
2122 2120 }
2123 2121
2124 2122 /*
2125 2123 * Determine if the system is under memory pressure and is asking
2126 2124 * to reclaim memory. A return value of 1 indicates that the system
2127 2125 * is under memory pressure and that the arc should adjust accordingly.
2128 2126 */
2129 2127 static int
2130 2128 arc_reclaim_needed(void)
2131 2129 {
2132 2130 uint64_t extra;
2133 2131
2134 2132 #ifdef _KERNEL
2135 2133
2136 2134 if (needfree)
2137 2135 return (1);
2138 2136
2139 2137 /*
2140 2138 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2141 2139 */
2142 2140 extra = desfree;
2143 2141
2144 2142 /*
2145 2143 * check that we're out of range of the pageout scanner. It starts to
2146 2144 * schedule paging if freemem is less than lotsfree and needfree.
2147 2145 * lotsfree is the high-water mark for pageout, and needfree is the
2148 2146 * number of needed free pages. We add extra pages here to make sure
2149 2147 * the scanner doesn't start up while we're freeing memory.
2150 2148 */
2151 2149 if (freemem < lotsfree + needfree + extra)
2152 2150 return (1);
2153 2151
2154 2152 /*
2155 2153 * check to make sure that swapfs has enough space so that anon
2156 2154 * reservations can still succeed. anon_resvmem() checks that the
2157 2155 * availrmem is greater than swapfs_minfree, and the number of reserved
2158 2156 * swap pages. We also add a bit of extra here just to prevent
2159 2157 * circumstances from getting really dire.
2160 2158 */
2161 2159 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2162 2160 return (1);
2163 2161
2164 2162 #if defined(__i386)
2165 2163 /*
2166 2164 * If we're on an i386 platform, it's possible that we'll exhaust the
2167 2165 * kernel heap space before we ever run out of available physical
2168 2166 * memory. Most checks of the size of the heap_area compare against
2169 2167 * tune.t_minarmem, which is the minimum available real memory that we
2170 2168 * can have in the system. However, this is generally fixed at 25 pages
2171 2169 * which is so low that it's useless. In this comparison, we seek to
2172 2170 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2173 2171 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2174 2172 * free)
2175 2173 */
2176 2174 if (vmem_size(heap_arena, VMEM_FREE) <
2177 2175 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2))
2178 2176 return (1);
2179 2177 #endif
2180 2178
2181 2179 /*
2182 2180 * If zio data pages are being allocated out of a separate heap segment,
2183 2181 * then enforce that the size of available vmem for this arena remains
2184 2182 * above about 1/16th free.
2185 2183 *
2186 2184 * Note: The 1/16th arena free requirement was put in place
2187 2185 * to aggressively evict memory from the arc in order to avoid
2188 2186 * memory fragmentation issues.
2189 2187 */
2190 2188 if (zio_arena != NULL &&
2191 2189 vmem_size(zio_arena, VMEM_FREE) <
2192 2190 (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2193 2191 return (1);
2194 2192 #else
2195 2193 if (spa_get_random(100) == 0)
2196 2194 return (1);
2197 2195 #endif
2198 2196 return (0);
2199 2197 }
2200 2198
2201 2199 static void
2202 2200 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2203 2201 {
2204 2202 size_t i;
2205 2203 kmem_cache_t *prev_cache = NULL;
2206 2204 kmem_cache_t *prev_data_cache = NULL;
2207 2205 extern kmem_cache_t *zio_buf_cache[];
2208 2206 extern kmem_cache_t *zio_data_buf_cache[];
2209 2207
2210 2208 #ifdef _KERNEL
2211 2209 if (arc_meta_used >= arc_meta_limit) {
2212 2210 /*
2213 2211 * We are exceeding our meta-data cache limit.
2214 2212 * Purge some DNLC entries to release holds on meta-data.
2215 2213 */
2216 2214 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2217 2215 }
2218 2216 #if defined(__i386)
2219 2217 /*
2220 2218 * Reclaim unused memory from all kmem caches.
2221 2219 */
2222 2220 kmem_reap();
2223 2221 #endif
2224 2222 #endif
2225 2223
2226 2224 /*
2227 2225 * An aggressive reclamation will shrink the cache size as well as
2228 2226 * reap free buffers from the arc kmem caches.
2229 2227 */
2230 2228 if (strat == ARC_RECLAIM_AGGR)
2231 2229 arc_shrink();
2232 2230
2233 2231 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2234 2232 if (zio_buf_cache[i] != prev_cache) {
2235 2233 prev_cache = zio_buf_cache[i];
2236 2234 kmem_cache_reap_now(zio_buf_cache[i]);
2237 2235 }
2238 2236 if (zio_data_buf_cache[i] != prev_data_cache) {
2239 2237 prev_data_cache = zio_data_buf_cache[i];
2240 2238 kmem_cache_reap_now(zio_data_buf_cache[i]);
2241 2239 }
2242 2240 }
2243 2241 kmem_cache_reap_now(buf_cache);
2244 2242 kmem_cache_reap_now(hdr_cache);
2245 2243
2246 2244 /*
2247 2245 * Ask the vmem areana to reclaim unused memory from its
2248 2246 * quantum caches.
2249 2247 */
2250 2248 if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2251 2249 vmem_qcache_reap(zio_arena);
2252 2250 }
2253 2251
2254 2252 static void
2255 2253 arc_reclaim_thread(void)
2256 2254 {
2257 2255 clock_t growtime = 0;
2258 2256 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2259 2257 callb_cpr_t cpr;
2260 2258
2261 2259 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2262 2260
2263 2261 mutex_enter(&arc_reclaim_thr_lock);
2264 2262 while (arc_thread_exit == 0) {
2265 2263 if (arc_reclaim_needed()) {
2266 2264
2267 2265 if (arc_no_grow) {
2268 2266 if (last_reclaim == ARC_RECLAIM_CONS) {
2269 2267 last_reclaim = ARC_RECLAIM_AGGR;
2270 2268 } else {
2271 2269 last_reclaim = ARC_RECLAIM_CONS;
2272 2270 }
2273 2271 } else {
2274 2272 arc_no_grow = TRUE;
2275 2273 last_reclaim = ARC_RECLAIM_AGGR;
2276 2274 membar_producer();
2277 2275 }
2278 2276
2279 2277 /* reset the growth delay for every reclaim */
2280 2278 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2281 2279
2282 2280 arc_kmem_reap_now(last_reclaim);
2283 2281 arc_warm = B_TRUE;
2284 2282
2285 2283 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2286 2284 arc_no_grow = FALSE;
2287 2285 }
2288 2286
2289 2287 arc_adjust();
2290 2288
2291 2289 if (arc_eviction_list != NULL)
2292 2290 arc_do_user_evicts();
2293 2291
2294 2292 /* block until needed, or one second, whichever is shorter */
2295 2293 CALLB_CPR_SAFE_BEGIN(&cpr);
2296 2294 (void) cv_timedwait(&arc_reclaim_thr_cv,
2297 2295 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2298 2296 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2299 2297 }
2300 2298
2301 2299 arc_thread_exit = 0;
2302 2300 cv_broadcast(&arc_reclaim_thr_cv);
2303 2301 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2304 2302 thread_exit();
2305 2303 }
2306 2304
2307 2305 /*
2308 2306 * Adapt arc info given the number of bytes we are trying to add and
2309 2307 * the state that we are comming from. This function is only called
2310 2308 * when we are adding new content to the cache.
2311 2309 */
2312 2310 static void
2313 2311 arc_adapt(int bytes, arc_state_t *state)
2314 2312 {
2315 2313 int mult;
2316 2314 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2317 2315
2318 2316 if (state == arc_l2c_only)
2319 2317 return;
2320 2318
2321 2319 ASSERT(bytes > 0);
2322 2320 /*
2323 2321 * Adapt the target size of the MRU list:
2324 2322 * - if we just hit in the MRU ghost list, then increase
2325 2323 * the target size of the MRU list.
2326 2324 * - if we just hit in the MFU ghost list, then increase
2327 2325 * the target size of the MFU list by decreasing the
2328 2326 * target size of the MRU list.
2329 2327 */
2330 2328 if (state == arc_mru_ghost) {
2331 2329 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2332 2330 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2333 2331 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2334 2332
2335 2333 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2336 2334 } else if (state == arc_mfu_ghost) {
2337 2335 uint64_t delta;
2338 2336
2339 2337 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2340 2338 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2341 2339 mult = MIN(mult, 10);
2342 2340
2343 2341 delta = MIN(bytes * mult, arc_p);
2344 2342 arc_p = MAX(arc_p_min, arc_p - delta);
2345 2343 }
2346 2344 ASSERT((int64_t)arc_p >= 0);
2347 2345
2348 2346 if (arc_reclaim_needed()) {
2349 2347 cv_signal(&arc_reclaim_thr_cv);
2350 2348 return;
2351 2349 }
2352 2350
2353 2351 if (arc_no_grow)
2354 2352 return;
2355 2353
2356 2354 if (arc_c >= arc_c_max)
2357 2355 return;
2358 2356
2359 2357 /*
2360 2358 * If we're within (2 * maxblocksize) bytes of the target
2361 2359 * cache size, increment the target cache size
2362 2360 */
2363 2361 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2364 2362 atomic_add_64(&arc_c, (int64_t)bytes);
2365 2363 if (arc_c > arc_c_max)
2366 2364 arc_c = arc_c_max;
2367 2365 else if (state == arc_anon)
2368 2366 atomic_add_64(&arc_p, (int64_t)bytes);
2369 2367 if (arc_p > arc_c)
2370 2368 arc_p = arc_c;
2371 2369 }
2372 2370 ASSERT((int64_t)arc_p >= 0);
2373 2371 }
2374 2372
2375 2373 /*
2376 2374 * Check if the cache has reached its limits and eviction is required
2377 2375 * prior to insert.
2378 2376 */
2379 2377 static int
2380 2378 arc_evict_needed(arc_buf_contents_t type)
2381 2379 {
2382 2380 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2383 2381 return (1);
2384 2382
2385 2383 if (arc_reclaim_needed())
2386 2384 return (1);
2387 2385
2388 2386 return (arc_size > arc_c);
2389 2387 }
2390 2388
2391 2389 /*
2392 2390 * The buffer, supplied as the first argument, needs a data block.
2393 2391 * So, if we are at cache max, determine which cache should be victimized.
2394 2392 * We have the following cases:
2395 2393 *
2396 2394 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2397 2395 * In this situation if we're out of space, but the resident size of the MFU is
2398 2396 * under the limit, victimize the MFU cache to satisfy this insertion request.
2399 2397 *
2400 2398 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2401 2399 * Here, we've used up all of the available space for the MRU, so we need to
2402 2400 * evict from our own cache instead. Evict from the set of resident MRU
2403 2401 * entries.
2404 2402 *
2405 2403 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2406 2404 * c minus p represents the MFU space in the cache, since p is the size of the
2407 2405 * cache that is dedicated to the MRU. In this situation there's still space on
2408 2406 * the MFU side, so the MRU side needs to be victimized.
2409 2407 *
2410 2408 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2411 2409 * MFU's resident set is consuming more space than it has been allotted. In
2412 2410 * this situation, we must victimize our own cache, the MFU, for this insertion.
2413 2411 */
2414 2412 static void
2415 2413 arc_get_data_buf(arc_buf_t *buf)
2416 2414 {
2417 2415 arc_state_t *state = buf->b_hdr->b_state;
2418 2416 uint64_t size = buf->b_hdr->b_size;
2419 2417 arc_buf_contents_t type = buf->b_hdr->b_type;
2420 2418
2421 2419 arc_adapt(size, state);
2422 2420
2423 2421 /*
2424 2422 * We have not yet reached cache maximum size,
2425 2423 * just allocate a new buffer.
2426 2424 */
2427 2425 if (!arc_evict_needed(type)) {
2428 2426 if (type == ARC_BUFC_METADATA) {
2429 2427 buf->b_data = zio_buf_alloc(size);
2430 2428 arc_space_consume(size, ARC_SPACE_DATA);
2431 2429 } else {
2432 2430 ASSERT(type == ARC_BUFC_DATA);
2433 2431 buf->b_data = zio_data_buf_alloc(size);
2434 2432 ARCSTAT_INCR(arcstat_data_size, size);
2435 2433 atomic_add_64(&arc_size, size);
2436 2434 }
2437 2435 goto out;
2438 2436 }
2439 2437
2440 2438 /*
2441 2439 * If we are prefetching from the mfu ghost list, this buffer
2442 2440 * will end up on the mru list; so steal space from there.
2443 2441 */
2444 2442 if (state == arc_mfu_ghost)
2445 2443 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2446 2444 else if (state == arc_mru_ghost)
2447 2445 state = arc_mru;
2448 2446
2449 2447 if (state == arc_mru || state == arc_anon) {
2450 2448 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2451 2449 state = (arc_mfu->arcs_lsize[type] >= size &&
2452 2450 arc_p > mru_used) ? arc_mfu : arc_mru;
2453 2451 } else {
2454 2452 /* MFU cases */
2455 2453 uint64_t mfu_space = arc_c - arc_p;
2456 2454 state = (arc_mru->arcs_lsize[type] >= size &&
2457 2455 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2458 2456 }
2459 2457 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2460 2458 if (type == ARC_BUFC_METADATA) {
2461 2459 buf->b_data = zio_buf_alloc(size);
2462 2460 arc_space_consume(size, ARC_SPACE_DATA);
2463 2461 } else {
2464 2462 ASSERT(type == ARC_BUFC_DATA);
2465 2463 buf->b_data = zio_data_buf_alloc(size);
2466 2464 ARCSTAT_INCR(arcstat_data_size, size);
2467 2465 atomic_add_64(&arc_size, size);
2468 2466 }
2469 2467 ARCSTAT_BUMP(arcstat_recycle_miss);
2470 2468 }
2471 2469 ASSERT(buf->b_data != NULL);
2472 2470 out:
2473 2471 /*
2474 2472 * Update the state size. Note that ghost states have a
2475 2473 * "ghost size" and so don't need to be updated.
2476 2474 */
2477 2475 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2478 2476 arc_buf_hdr_t *hdr = buf->b_hdr;
2479 2477
2480 2478 atomic_add_64(&hdr->b_state->arcs_size, size);
2481 2479 if (list_link_active(&hdr->b_arc_node)) {
2482 2480 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2483 2481 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2484 2482 }
2485 2483 /*
2486 2484 * If we are growing the cache, and we are adding anonymous
2487 2485 * data, and we have outgrown arc_p, update arc_p
2488 2486 */
2489 2487 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2490 2488 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2491 2489 arc_p = MIN(arc_c, arc_p + size);
2492 2490 }
2493 2491 }
2494 2492
2495 2493 /*
2496 2494 * This routine is called whenever a buffer is accessed.
2497 2495 * NOTE: the hash lock is dropped in this function.
2498 2496 */
2499 2497 static void
2500 2498 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2501 2499 {
2502 2500 clock_t now;
2503 2501
2504 2502 ASSERT(MUTEX_HELD(hash_lock));
2505 2503
2506 2504 if (buf->b_state == arc_anon) {
2507 2505 /*
2508 2506 * This buffer is not in the cache, and does not
2509 2507 * appear in our "ghost" list. Add the new buffer
2510 2508 * to the MRU state.
2511 2509 */
2512 2510
2513 2511 ASSERT(buf->b_arc_access == 0);
2514 2512 buf->b_arc_access = ddi_get_lbolt();
2515 2513 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2516 2514 arc_change_state(arc_mru, buf, hash_lock);
2517 2515
2518 2516 } else if (buf->b_state == arc_mru) {
2519 2517 now = ddi_get_lbolt();
2520 2518
2521 2519 /*
2522 2520 * If this buffer is here because of a prefetch, then either:
2523 2521 * - clear the flag if this is a "referencing" read
2524 2522 * (any subsequent access will bump this into the MFU state).
2525 2523 * or
2526 2524 * - move the buffer to the head of the list if this is
2527 2525 * another prefetch (to make it less likely to be evicted).
2528 2526 */
2529 2527 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2530 2528 if (refcount_count(&buf->b_refcnt) == 0) {
2531 2529 ASSERT(list_link_active(&buf->b_arc_node));
2532 2530 } else {
2533 2531 buf->b_flags &= ~ARC_PREFETCH;
2534 2532 ARCSTAT_BUMP(arcstat_mru_hits);
2535 2533 }
2536 2534 buf->b_arc_access = now;
2537 2535 return;
2538 2536 }
2539 2537
2540 2538 /*
2541 2539 * This buffer has been "accessed" only once so far,
2542 2540 * but it is still in the cache. Move it to the MFU
2543 2541 * state.
2544 2542 */
2545 2543 if (now > buf->b_arc_access + ARC_MINTIME) {
2546 2544 /*
2547 2545 * More than 125ms have passed since we
2548 2546 * instantiated this buffer. Move it to the
2549 2547 * most frequently used state.
2550 2548 */
2551 2549 buf->b_arc_access = now;
2552 2550 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2553 2551 arc_change_state(arc_mfu, buf, hash_lock);
2554 2552 }
2555 2553 ARCSTAT_BUMP(arcstat_mru_hits);
2556 2554 } else if (buf->b_state == arc_mru_ghost) {
2557 2555 arc_state_t *new_state;
2558 2556 /*
2559 2557 * This buffer has been "accessed" recently, but
2560 2558 * was evicted from the cache. Move it to the
2561 2559 * MFU state.
2562 2560 */
2563 2561
2564 2562 if (buf->b_flags & ARC_PREFETCH) {
2565 2563 new_state = arc_mru;
2566 2564 if (refcount_count(&buf->b_refcnt) > 0)
2567 2565 buf->b_flags &= ~ARC_PREFETCH;
2568 2566 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2569 2567 } else {
2570 2568 new_state = arc_mfu;
2571 2569 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2572 2570 }
2573 2571
2574 2572 buf->b_arc_access = ddi_get_lbolt();
2575 2573 arc_change_state(new_state, buf, hash_lock);
2576 2574
2577 2575 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2578 2576 } else if (buf->b_state == arc_mfu) {
2579 2577 /*
2580 2578 * This buffer has been accessed more than once and is
2581 2579 * still in the cache. Keep it in the MFU state.
2582 2580 *
2583 2581 * NOTE: an add_reference() that occurred when we did
2584 2582 * the arc_read() will have kicked this off the list.
2585 2583 * If it was a prefetch, we will explicitly move it to
2586 2584 * the head of the list now.
2587 2585 */
2588 2586 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2589 2587 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2590 2588 ASSERT(list_link_active(&buf->b_arc_node));
2591 2589 }
2592 2590 ARCSTAT_BUMP(arcstat_mfu_hits);
2593 2591 buf->b_arc_access = ddi_get_lbolt();
2594 2592 } else if (buf->b_state == arc_mfu_ghost) {
2595 2593 arc_state_t *new_state = arc_mfu;
2596 2594 /*
2597 2595 * This buffer has been accessed more than once but has
2598 2596 * been evicted from the cache. Move it back to the
2599 2597 * MFU state.
2600 2598 */
2601 2599
2602 2600 if (buf->b_flags & ARC_PREFETCH) {
2603 2601 /*
2604 2602 * This is a prefetch access...
2605 2603 * move this block back to the MRU state.
2606 2604 */
2607 2605 ASSERT0(refcount_count(&buf->b_refcnt));
2608 2606 new_state = arc_mru;
2609 2607 }
2610 2608
2611 2609 buf->b_arc_access = ddi_get_lbolt();
2612 2610 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2613 2611 arc_change_state(new_state, buf, hash_lock);
2614 2612
2615 2613 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2616 2614 } else if (buf->b_state == arc_l2c_only) {
2617 2615 /*
2618 2616 * This buffer is on the 2nd Level ARC.
2619 2617 */
2620 2618
2621 2619 buf->b_arc_access = ddi_get_lbolt();
2622 2620 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2623 2621 arc_change_state(arc_mfu, buf, hash_lock);
2624 2622 } else {
2625 2623 ASSERT(!"invalid arc state");
2626 2624 }
2627 2625 }
2628 2626
2629 2627 /* a generic arc_done_func_t which you can use */
2630 2628 /* ARGSUSED */
2631 2629 void
2632 2630 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2633 2631 {
2634 2632 if (zio == NULL || zio->io_error == 0)
2635 2633 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2636 2634 VERIFY(arc_buf_remove_ref(buf, arg));
2637 2635 }
2638 2636
2639 2637 /* a generic arc_done_func_t */
2640 2638 void
2641 2639 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2642 2640 {
2643 2641 arc_buf_t **bufp = arg;
2644 2642 if (zio && zio->io_error) {
2645 2643 VERIFY(arc_buf_remove_ref(buf, arg));
2646 2644 *bufp = NULL;
2647 2645 } else {
2648 2646 *bufp = buf;
2649 2647 ASSERT(buf->b_data);
2650 2648 }
2651 2649 }
2652 2650
2653 2651 static void
2654 2652 arc_read_done(zio_t *zio)
2655 2653 {
2656 2654 arc_buf_hdr_t *hdr, *found;
2657 2655 arc_buf_t *buf;
2658 2656 arc_buf_t *abuf; /* buffer we're assigning to callback */
2659 2657 kmutex_t *hash_lock;
2660 2658 arc_callback_t *callback_list, *acb;
2661 2659 int freeable = FALSE;
2662 2660
2663 2661 buf = zio->io_private;
2664 2662 hdr = buf->b_hdr;
2665 2663
2666 2664 /*
2667 2665 * The hdr was inserted into hash-table and removed from lists
2668 2666 * prior to starting I/O. We should find this header, since
2669 2667 * it's in the hash table, and it should be legit since it's
2670 2668 * not possible to evict it during the I/O. The only possible
2671 2669 * reason for it not to be found is if we were freed during the
2672 2670 * read.
2673 2671 */
2674 2672 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2675 2673 &hash_lock);
2676 2674
2677 2675 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2678 2676 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2679 2677 (found == hdr && HDR_L2_READING(hdr)));
2680 2678
2681 2679 hdr->b_flags &= ~ARC_L2_EVICTED;
2682 2680 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2683 2681 hdr->b_flags &= ~ARC_L2CACHE;
2684 2682
2685 2683 /* byteswap if necessary */
2686 2684 callback_list = hdr->b_acb;
2687 2685 ASSERT(callback_list != NULL);
2688 2686 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2689 2687 dmu_object_byteswap_t bswap =
2690 2688 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2691 2689 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2692 2690 byteswap_uint64_array :
2693 2691 dmu_ot_byteswap[bswap].ob_func;
2694 2692 func(buf->b_data, hdr->b_size);
2695 2693 }
2696 2694
2697 2695 arc_cksum_compute(buf, B_FALSE);
2698 2696 arc_buf_watch(buf);
2699 2697
2700 2698 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2701 2699 /*
2702 2700 * Only call arc_access on anonymous buffers. This is because
2703 2701 * if we've issued an I/O for an evicted buffer, we've already
2704 2702 * called arc_access (to prevent any simultaneous readers from
2705 2703 * getting confused).
2706 2704 */
2707 2705 arc_access(hdr, hash_lock);
2708 2706 }
2709 2707
2710 2708 /* create copies of the data buffer for the callers */
2711 2709 abuf = buf;
2712 2710 for (acb = callback_list; acb; acb = acb->acb_next) {
2713 2711 if (acb->acb_done) {
2714 2712 if (abuf == NULL) {
2715 2713 ARCSTAT_BUMP(arcstat_duplicate_reads);
2716 2714 abuf = arc_buf_clone(buf);
2717 2715 }
2718 2716 acb->acb_buf = abuf;
2719 2717 abuf = NULL;
2720 2718 }
2721 2719 }
2722 2720 hdr->b_acb = NULL;
2723 2721 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2724 2722 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2725 2723 if (abuf == buf) {
2726 2724 ASSERT(buf->b_efunc == NULL);
2727 2725 ASSERT(hdr->b_datacnt == 1);
2728 2726 hdr->b_flags |= ARC_BUF_AVAILABLE;
2729 2727 }
2730 2728
2731 2729 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2732 2730
2733 2731 if (zio->io_error != 0) {
2734 2732 hdr->b_flags |= ARC_IO_ERROR;
2735 2733 if (hdr->b_state != arc_anon)
2736 2734 arc_change_state(arc_anon, hdr, hash_lock);
2737 2735 if (HDR_IN_HASH_TABLE(hdr))
2738 2736 buf_hash_remove(hdr);
2739 2737 freeable = refcount_is_zero(&hdr->b_refcnt);
2740 2738 }
2741 2739
2742 2740 /*
2743 2741 * Broadcast before we drop the hash_lock to avoid the possibility
2744 2742 * that the hdr (and hence the cv) might be freed before we get to
2745 2743 * the cv_broadcast().
2746 2744 */
2747 2745 cv_broadcast(&hdr->b_cv);
2748 2746
2749 2747 if (hash_lock) {
2750 2748 mutex_exit(hash_lock);
2751 2749 } else {
2752 2750 /*
2753 2751 * This block was freed while we waited for the read to
2754 2752 * complete. It has been removed from the hash table and
2755 2753 * moved to the anonymous state (so that it won't show up
2756 2754 * in the cache).
2757 2755 */
2758 2756 ASSERT3P(hdr->b_state, ==, arc_anon);
2759 2757 freeable = refcount_is_zero(&hdr->b_refcnt);
2760 2758 }
2761 2759
2762 2760 /* execute each callback and free its structure */
2763 2761 while ((acb = callback_list) != NULL) {
2764 2762 if (acb->acb_done)
2765 2763 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2766 2764
2767 2765 if (acb->acb_zio_dummy != NULL) {
2768 2766 acb->acb_zio_dummy->io_error = zio->io_error;
2769 2767 zio_nowait(acb->acb_zio_dummy);
2770 2768 }
2771 2769
2772 2770 callback_list = acb->acb_next;
2773 2771 kmem_free(acb, sizeof (arc_callback_t));
2774 2772 }
2775 2773
2776 2774 if (freeable)
2777 2775 arc_hdr_destroy(hdr);
2778 2776 }
2779 2777
2780 2778 /*
2781 2779 * "Read" the block at the specified DVA (in bp) via the
2782 2780 * cache. If the block is found in the cache, invoke the provided
2783 2781 * callback immediately and return. Note that the `zio' parameter
2784 2782 * in the callback will be NULL in this case, since no IO was
2785 2783 * required. If the block is not in the cache pass the read request
2786 2784 * on to the spa with a substitute callback function, so that the
2787 2785 * requested block will be added to the cache.
2788 2786 *
2789 2787 * If a read request arrives for a block that has a read in-progress,
2790 2788 * either wait for the in-progress read to complete (and return the
2791 2789 * results); or, if this is a read with a "done" func, add a record
2792 2790 * to the read to invoke the "done" func when the read completes,
2793 2791 * and return; or just return.
2794 2792 *
2795 2793 * arc_read_done() will invoke all the requested "done" functions
2796 2794 * for readers of this block.
2797 2795 */
2798 2796 int
2799 2797 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2800 2798 void *private, int priority, int zio_flags, uint32_t *arc_flags,
2801 2799 const zbookmark_t *zb)
2802 2800 {
2803 2801 arc_buf_hdr_t *hdr;
2804 2802 arc_buf_t *buf = NULL;
2805 2803 kmutex_t *hash_lock;
2806 2804 zio_t *rzio;
2807 2805 uint64_t guid = spa_load_guid(spa);
2808 2806
2809 2807 top:
2810 2808 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2811 2809 &hash_lock);
2812 2810 if (hdr && hdr->b_datacnt > 0) {
2813 2811
2814 2812 *arc_flags |= ARC_CACHED;
2815 2813
2816 2814 if (HDR_IO_IN_PROGRESS(hdr)) {
2817 2815
2818 2816 if (*arc_flags & ARC_WAIT) {
2819 2817 cv_wait(&hdr->b_cv, hash_lock);
2820 2818 mutex_exit(hash_lock);
2821 2819 goto top;
2822 2820 }
2823 2821 ASSERT(*arc_flags & ARC_NOWAIT);
2824 2822
2825 2823 if (done) {
2826 2824 arc_callback_t *acb = NULL;
2827 2825
2828 2826 acb = kmem_zalloc(sizeof (arc_callback_t),
2829 2827 KM_SLEEP);
2830 2828 acb->acb_done = done;
2831 2829 acb->acb_private = private;
2832 2830 if (pio != NULL)
2833 2831 acb->acb_zio_dummy = zio_null(pio,
2834 2832 spa, NULL, NULL, NULL, zio_flags);
2835 2833
2836 2834 ASSERT(acb->acb_done != NULL);
2837 2835 acb->acb_next = hdr->b_acb;
2838 2836 hdr->b_acb = acb;
2839 2837 add_reference(hdr, hash_lock, private);
2840 2838 mutex_exit(hash_lock);
2841 2839 return (0);
2842 2840 }
2843 2841 mutex_exit(hash_lock);
2844 2842 return (0);
2845 2843 }
2846 2844
2847 2845 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2848 2846
2849 2847 if (done) {
2850 2848 add_reference(hdr, hash_lock, private);
2851 2849 /*
2852 2850 * If this block is already in use, create a new
2853 2851 * copy of the data so that we will be guaranteed
2854 2852 * that arc_release() will always succeed.
2855 2853 */
2856 2854 buf = hdr->b_buf;
2857 2855 ASSERT(buf);
2858 2856 ASSERT(buf->b_data);
2859 2857 if (HDR_BUF_AVAILABLE(hdr)) {
2860 2858 ASSERT(buf->b_efunc == NULL);
2861 2859 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2862 2860 } else {
2863 2861 buf = arc_buf_clone(buf);
2864 2862 }
2865 2863
2866 2864 } else if (*arc_flags & ARC_PREFETCH &&
2867 2865 refcount_count(&hdr->b_refcnt) == 0) {
2868 2866 hdr->b_flags |= ARC_PREFETCH;
2869 2867 }
2870 2868 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2871 2869 arc_access(hdr, hash_lock);
2872 2870 if (*arc_flags & ARC_L2CACHE)
2873 2871 hdr->b_flags |= ARC_L2CACHE;
2874 2872 mutex_exit(hash_lock);
2875 2873 ARCSTAT_BUMP(arcstat_hits);
2876 2874 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2877 2875 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2878 2876 data, metadata, hits);
2879 2877
2880 2878 if (done)
2881 2879 done(NULL, buf, private);
2882 2880 } else {
2883 2881 uint64_t size = BP_GET_LSIZE(bp);
2884 2882 arc_callback_t *acb;
2885 2883 vdev_t *vd = NULL;
2886 2884 uint64_t addr = 0;
2887 2885 boolean_t devw = B_FALSE;
2888 2886
2889 2887 if (hdr == NULL) {
2890 2888 /* this block is not in the cache */
2891 2889 arc_buf_hdr_t *exists;
2892 2890 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2893 2891 buf = arc_buf_alloc(spa, size, private, type);
2894 2892 hdr = buf->b_hdr;
2895 2893 hdr->b_dva = *BP_IDENTITY(bp);
2896 2894 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2897 2895 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2898 2896 exists = buf_hash_insert(hdr, &hash_lock);
2899 2897 if (exists) {
2900 2898 /* somebody beat us to the hash insert */
2901 2899 mutex_exit(hash_lock);
2902 2900 buf_discard_identity(hdr);
2903 2901 (void) arc_buf_remove_ref(buf, private);
2904 2902 goto top; /* restart the IO request */
2905 2903 }
2906 2904 /* if this is a prefetch, we don't have a reference */
2907 2905 if (*arc_flags & ARC_PREFETCH) {
2908 2906 (void) remove_reference(hdr, hash_lock,
2909 2907 private);
2910 2908 hdr->b_flags |= ARC_PREFETCH;
2911 2909 }
2912 2910 if (*arc_flags & ARC_L2CACHE)
2913 2911 hdr->b_flags |= ARC_L2CACHE;
2914 2912 if (BP_GET_LEVEL(bp) > 0)
2915 2913 hdr->b_flags |= ARC_INDIRECT;
2916 2914 } else {
2917 2915 /* this block is in the ghost cache */
2918 2916 ASSERT(GHOST_STATE(hdr->b_state));
2919 2917 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2920 2918 ASSERT0(refcount_count(&hdr->b_refcnt));
2921 2919 ASSERT(hdr->b_buf == NULL);
2922 2920
2923 2921 /* if this is a prefetch, we don't have a reference */
2924 2922 if (*arc_flags & ARC_PREFETCH)
2925 2923 hdr->b_flags |= ARC_PREFETCH;
2926 2924 else
2927 2925 add_reference(hdr, hash_lock, private);
2928 2926 if (*arc_flags & ARC_L2CACHE)
2929 2927 hdr->b_flags |= ARC_L2CACHE;
2930 2928 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2931 2929 buf->b_hdr = hdr;
2932 2930 buf->b_data = NULL;
2933 2931 buf->b_efunc = NULL;
2934 2932 buf->b_private = NULL;
2935 2933 buf->b_next = NULL;
2936 2934 hdr->b_buf = buf;
2937 2935 ASSERT(hdr->b_datacnt == 0);
2938 2936 hdr->b_datacnt = 1;
2939 2937 arc_get_data_buf(buf);
2940 2938 arc_access(hdr, hash_lock);
2941 2939 }
2942 2940
2943 2941 ASSERT(!GHOST_STATE(hdr->b_state));
2944 2942
2945 2943 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2946 2944 acb->acb_done = done;
2947 2945 acb->acb_private = private;
2948 2946
2949 2947 ASSERT(hdr->b_acb == NULL);
2950 2948 hdr->b_acb = acb;
2951 2949 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2952 2950
2953 2951 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2954 2952 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2955 2953 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2956 2954 addr = hdr->b_l2hdr->b_daddr;
2957 2955 /*
2958 2956 * Lock out device removal.
2959 2957 */
2960 2958 if (vdev_is_dead(vd) ||
2961 2959 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2962 2960 vd = NULL;
2963 2961 }
2964 2962
2965 2963 mutex_exit(hash_lock);
2966 2964
2967 2965 /*
2968 2966 * At this point, we have a level 1 cache miss. Try again in
2969 2967 * L2ARC if possible.
2970 2968 */
2971 2969 ASSERT3U(hdr->b_size, ==, size);
2972 2970 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
2973 2971 uint64_t, size, zbookmark_t *, zb);
2974 2972 ARCSTAT_BUMP(arcstat_misses);
2975 2973 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2976 2974 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2977 2975 data, metadata, misses);
2978 2976
2979 2977 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2980 2978 /*
2981 2979 * Read from the L2ARC if the following are true:
2982 2980 * 1. The L2ARC vdev was previously cached.
2983 2981 * 2. This buffer still has L2ARC metadata.
2984 2982 * 3. This buffer isn't currently writing to the L2ARC.
2985 2983 * 4. The L2ARC entry wasn't evicted, which may
2986 2984 * also have invalidated the vdev.
2987 2985 * 5. This isn't prefetch and l2arc_noprefetch is set.
2988 2986 */
2989 2987 if (hdr->b_l2hdr != NULL &&
2990 2988 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2991 2989 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2992 2990 l2arc_read_callback_t *cb;
2993 2991
2994 2992 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2995 2993 ARCSTAT_BUMP(arcstat_l2_hits);
2996 2994
2997 2995 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2998 2996 KM_SLEEP);
2999 2997 cb->l2rcb_buf = buf;
3000 2998 cb->l2rcb_spa = spa;
3001 2999 cb->l2rcb_bp = *bp;
3002 3000 cb->l2rcb_zb = *zb;
3003 3001 cb->l2rcb_flags = zio_flags;
3004 3002
3005 3003 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3006 3004 addr + size < vd->vdev_psize -
3007 3005 VDEV_LABEL_END_SIZE);
3008 3006
3009 3007 /*
3010 3008 * l2arc read. The SCL_L2ARC lock will be
3011 3009 * released by l2arc_read_done().
3012 3010 */
3013 3011 rzio = zio_read_phys(pio, vd, addr, size,
3014 3012 buf->b_data, ZIO_CHECKSUM_OFF,
3015 3013 l2arc_read_done, cb, priority, zio_flags |
3016 3014 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
3017 3015 ZIO_FLAG_DONT_PROPAGATE |
3018 3016 ZIO_FLAG_DONT_RETRY, B_FALSE);
3019 3017 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3020 3018 zio_t *, rzio);
3021 3019 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
3022 3020
3023 3021 if (*arc_flags & ARC_NOWAIT) {
3024 3022 zio_nowait(rzio);
3025 3023 return (0);
3026 3024 }
3027 3025
3028 3026 ASSERT(*arc_flags & ARC_WAIT);
3029 3027 if (zio_wait(rzio) == 0)
3030 3028 return (0);
3031 3029
3032 3030 /* l2arc read error; goto zio_read() */
3033 3031 } else {
3034 3032 DTRACE_PROBE1(l2arc__miss,
3035 3033 arc_buf_hdr_t *, hdr);
3036 3034 ARCSTAT_BUMP(arcstat_l2_misses);
3037 3035 if (HDR_L2_WRITING(hdr))
3038 3036 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3039 3037 spa_config_exit(spa, SCL_L2ARC, vd);
3040 3038 }
3041 3039 } else {
3042 3040 if (vd != NULL)
3043 3041 spa_config_exit(spa, SCL_L2ARC, vd);
3044 3042 if (l2arc_ndev != 0) {
3045 3043 DTRACE_PROBE1(l2arc__miss,
3046 3044 arc_buf_hdr_t *, hdr);
3047 3045 ARCSTAT_BUMP(arcstat_l2_misses);
3048 3046 }
3049 3047 }
3050 3048
3051 3049 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3052 3050 arc_read_done, buf, priority, zio_flags, zb);
3053 3051
3054 3052 if (*arc_flags & ARC_WAIT)
3055 3053 return (zio_wait(rzio));
3056 3054
3057 3055 ASSERT(*arc_flags & ARC_NOWAIT);
3058 3056 zio_nowait(rzio);
3059 3057 }
3060 3058 return (0);
3061 3059 }
3062 3060
3063 3061 void
3064 3062 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3065 3063 {
3066 3064 ASSERT(buf->b_hdr != NULL);
3067 3065 ASSERT(buf->b_hdr->b_state != arc_anon);
3068 3066 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3069 3067 ASSERT(buf->b_efunc == NULL);
3070 3068 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3071 3069
3072 3070 buf->b_efunc = func;
3073 3071 buf->b_private = private;
3074 3072 }
3075 3073
3076 3074 /*
3077 3075 * This is used by the DMU to let the ARC know that a buffer is
3078 3076 * being evicted, so the ARC should clean up. If this arc buf
3079 3077 * is not yet in the evicted state, it will be put there.
3080 3078 */
3081 3079 int
3082 3080 arc_buf_evict(arc_buf_t *buf)
3083 3081 {
3084 3082 arc_buf_hdr_t *hdr;
3085 3083 kmutex_t *hash_lock;
3086 3084 arc_buf_t **bufp;
3087 3085
3088 3086 mutex_enter(&buf->b_evict_lock);
3089 3087 hdr = buf->b_hdr;
3090 3088 if (hdr == NULL) {
3091 3089 /*
3092 3090 * We are in arc_do_user_evicts().
3093 3091 */
3094 3092 ASSERT(buf->b_data == NULL);
3095 3093 mutex_exit(&buf->b_evict_lock);
3096 3094 return (0);
3097 3095 } else if (buf->b_data == NULL) {
3098 3096 arc_buf_t copy = *buf; /* structure assignment */
3099 3097 /*
3100 3098 * We are on the eviction list; process this buffer now
3101 3099 * but let arc_do_user_evicts() do the reaping.
3102 3100 */
3103 3101 buf->b_efunc = NULL;
3104 3102 mutex_exit(&buf->b_evict_lock);
3105 3103 VERIFY(copy.b_efunc(©) == 0);
3106 3104 return (1);
3107 3105 }
3108 3106 hash_lock = HDR_LOCK(hdr);
3109 3107 mutex_enter(hash_lock);
3110 3108 hdr = buf->b_hdr;
3111 3109 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3112 3110
3113 3111 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3114 3112 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3115 3113
3116 3114 /*
3117 3115 * Pull this buffer off of the hdr
3118 3116 */
3119 3117 bufp = &hdr->b_buf;
3120 3118 while (*bufp != buf)
3121 3119 bufp = &(*bufp)->b_next;
3122 3120 *bufp = buf->b_next;
3123 3121
3124 3122 ASSERT(buf->b_data != NULL);
3125 3123 arc_buf_destroy(buf, FALSE, FALSE);
3126 3124
3127 3125 if (hdr->b_datacnt == 0) {
3128 3126 arc_state_t *old_state = hdr->b_state;
3129 3127 arc_state_t *evicted_state;
3130 3128
3131 3129 ASSERT(hdr->b_buf == NULL);
3132 3130 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3133 3131
3134 3132 evicted_state =
3135 3133 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3136 3134
3137 3135 mutex_enter(&old_state->arcs_mtx);
3138 3136 mutex_enter(&evicted_state->arcs_mtx);
3139 3137
3140 3138 arc_change_state(evicted_state, hdr, hash_lock);
3141 3139 ASSERT(HDR_IN_HASH_TABLE(hdr));
3142 3140 hdr->b_flags |= ARC_IN_HASH_TABLE;
3143 3141 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3144 3142
3145 3143 mutex_exit(&evicted_state->arcs_mtx);
3146 3144 mutex_exit(&old_state->arcs_mtx);
3147 3145 }
3148 3146 mutex_exit(hash_lock);
3149 3147 mutex_exit(&buf->b_evict_lock);
3150 3148
3151 3149 VERIFY(buf->b_efunc(buf) == 0);
3152 3150 buf->b_efunc = NULL;
3153 3151 buf->b_private = NULL;
3154 3152 buf->b_hdr = NULL;
3155 3153 buf->b_next = NULL;
3156 3154 kmem_cache_free(buf_cache, buf);
3157 3155 return (1);
3158 3156 }
3159 3157
3160 3158 /*
3161 3159 * Release this buffer from the cache, making it an anonymous buffer. This
3162 3160 * must be done after a read and prior to modifying the buffer contents.
3163 3161 * If the buffer has more than one reference, we must make
3164 3162 * a new hdr for the buffer.
3165 3163 */
3166 3164 void
3167 3165 arc_release(arc_buf_t *buf, void *tag)
3168 3166 {
3169 3167 arc_buf_hdr_t *hdr;
3170 3168 kmutex_t *hash_lock = NULL;
3171 3169 l2arc_buf_hdr_t *l2hdr;
3172 3170 uint64_t buf_size;
3173 3171
3174 3172 /*
3175 3173 * It would be nice to assert that if it's DMU metadata (level >
3176 3174 * 0 || it's the dnode file), then it must be syncing context.
3177 3175 * But we don't know that information at this level.
3178 3176 */
3179 3177
3180 3178 mutex_enter(&buf->b_evict_lock);
3181 3179 hdr = buf->b_hdr;
3182 3180
3183 3181 /* this buffer is not on any list */
3184 3182 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3185 3183
3186 3184 if (hdr->b_state == arc_anon) {
3187 3185 /* this buffer is already released */
3188 3186 ASSERT(buf->b_efunc == NULL);
3189 3187 } else {
3190 3188 hash_lock = HDR_LOCK(hdr);
3191 3189 mutex_enter(hash_lock);
3192 3190 hdr = buf->b_hdr;
3193 3191 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3194 3192 }
3195 3193
3196 3194 l2hdr = hdr->b_l2hdr;
3197 3195 if (l2hdr) {
3198 3196 mutex_enter(&l2arc_buflist_mtx);
3199 3197 hdr->b_l2hdr = NULL;
3200 3198 }
3201 3199 buf_size = hdr->b_size;
3202 3200
3203 3201 /*
3204 3202 * Do we have more than one buf?
3205 3203 */
3206 3204 if (hdr->b_datacnt > 1) {
3207 3205 arc_buf_hdr_t *nhdr;
3208 3206 arc_buf_t **bufp;
3209 3207 uint64_t blksz = hdr->b_size;
3210 3208 uint64_t spa = hdr->b_spa;
3211 3209 arc_buf_contents_t type = hdr->b_type;
3212 3210 uint32_t flags = hdr->b_flags;
3213 3211
3214 3212 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3215 3213 /*
3216 3214 * Pull the data off of this hdr and attach it to
3217 3215 * a new anonymous hdr.
3218 3216 */
3219 3217 (void) remove_reference(hdr, hash_lock, tag);
3220 3218 bufp = &hdr->b_buf;
3221 3219 while (*bufp != buf)
3222 3220 bufp = &(*bufp)->b_next;
3223 3221 *bufp = buf->b_next;
3224 3222 buf->b_next = NULL;
3225 3223
3226 3224 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3227 3225 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3228 3226 if (refcount_is_zero(&hdr->b_refcnt)) {
3229 3227 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3230 3228 ASSERT3U(*size, >=, hdr->b_size);
3231 3229 atomic_add_64(size, -hdr->b_size);
3232 3230 }
3233 3231
3234 3232 /*
3235 3233 * We're releasing a duplicate user data buffer, update
3236 3234 * our statistics accordingly.
3237 3235 */
3238 3236 if (hdr->b_type == ARC_BUFC_DATA) {
3239 3237 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3240 3238 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3241 3239 -hdr->b_size);
3242 3240 }
3243 3241 hdr->b_datacnt -= 1;
3244 3242 arc_cksum_verify(buf);
3245 3243 arc_buf_unwatch(buf);
3246 3244
3247 3245 mutex_exit(hash_lock);
3248 3246
3249 3247 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3250 3248 nhdr->b_size = blksz;
3251 3249 nhdr->b_spa = spa;
3252 3250 nhdr->b_type = type;
3253 3251 nhdr->b_buf = buf;
3254 3252 nhdr->b_state = arc_anon;
3255 3253 nhdr->b_arc_access = 0;
3256 3254 nhdr->b_flags = flags & ARC_L2_WRITING;
3257 3255 nhdr->b_l2hdr = NULL;
3258 3256 nhdr->b_datacnt = 1;
3259 3257 nhdr->b_freeze_cksum = NULL;
3260 3258 (void) refcount_add(&nhdr->b_refcnt, tag);
3261 3259 buf->b_hdr = nhdr;
3262 3260 mutex_exit(&buf->b_evict_lock);
3263 3261 atomic_add_64(&arc_anon->arcs_size, blksz);
3264 3262 } else {
3265 3263 mutex_exit(&buf->b_evict_lock);
3266 3264 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3267 3265 ASSERT(!list_link_active(&hdr->b_arc_node));
3268 3266 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3269 3267 if (hdr->b_state != arc_anon)
3270 3268 arc_change_state(arc_anon, hdr, hash_lock);
3271 3269 hdr->b_arc_access = 0;
3272 3270 if (hash_lock)
3273 3271 mutex_exit(hash_lock);
3274 3272
3275 3273 buf_discard_identity(hdr);
3276 3274 arc_buf_thaw(buf);
3277 3275 }
3278 3276 buf->b_efunc = NULL;
3279 3277 buf->b_private = NULL;
3280 3278
3281 3279 if (l2hdr) {
3282 3280 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3283 3281 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3284 3282 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3285 3283 mutex_exit(&l2arc_buflist_mtx);
3286 3284 }
3287 3285 }
3288 3286
3289 3287 int
3290 3288 arc_released(arc_buf_t *buf)
3291 3289 {
3292 3290 int released;
3293 3291
3294 3292 mutex_enter(&buf->b_evict_lock);
3295 3293 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3296 3294 mutex_exit(&buf->b_evict_lock);
3297 3295 return (released);
3298 3296 }
3299 3297
3300 3298 int
3301 3299 arc_has_callback(arc_buf_t *buf)
3302 3300 {
3303 3301 int callback;
3304 3302
3305 3303 mutex_enter(&buf->b_evict_lock);
3306 3304 callback = (buf->b_efunc != NULL);
3307 3305 mutex_exit(&buf->b_evict_lock);
3308 3306 return (callback);
3309 3307 }
3310 3308
3311 3309 #ifdef ZFS_DEBUG
3312 3310 int
3313 3311 arc_referenced(arc_buf_t *buf)
3314 3312 {
3315 3313 int referenced;
3316 3314
3317 3315 mutex_enter(&buf->b_evict_lock);
3318 3316 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3319 3317 mutex_exit(&buf->b_evict_lock);
3320 3318 return (referenced);
3321 3319 }
3322 3320 #endif
3323 3321
3324 3322 static void
3325 3323 arc_write_ready(zio_t *zio)
3326 3324 {
3327 3325 arc_write_callback_t *callback = zio->io_private;
3328 3326 arc_buf_t *buf = callback->awcb_buf;
3329 3327 arc_buf_hdr_t *hdr = buf->b_hdr;
3330 3328
3331 3329 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3332 3330 callback->awcb_ready(zio, buf, callback->awcb_private);
3333 3331
3334 3332 /*
3335 3333 * If the IO is already in progress, then this is a re-write
3336 3334 * attempt, so we need to thaw and re-compute the cksum.
3337 3335 * It is the responsibility of the callback to handle the
3338 3336 * accounting for any re-write attempt.
3339 3337 */
3340 3338 if (HDR_IO_IN_PROGRESS(hdr)) {
3341 3339 mutex_enter(&hdr->b_freeze_lock);
3342 3340 if (hdr->b_freeze_cksum != NULL) {
3343 3341 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3344 3342 hdr->b_freeze_cksum = NULL;
3345 3343 }
3346 3344 mutex_exit(&hdr->b_freeze_lock);
3347 3345 }
3348 3346 arc_cksum_compute(buf, B_FALSE);
3349 3347 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3350 3348 }
3351 3349
3352 3350 static void
3353 3351 arc_write_done(zio_t *zio)
3354 3352 {
3355 3353 arc_write_callback_t *callback = zio->io_private;
3356 3354 arc_buf_t *buf = callback->awcb_buf;
3357 3355 arc_buf_hdr_t *hdr = buf->b_hdr;
3358 3356
3359 3357 ASSERT(hdr->b_acb == NULL);
3360 3358
3361 3359 if (zio->io_error == 0) {
3362 3360 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3363 3361 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3364 3362 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3365 3363 } else {
3366 3364 ASSERT(BUF_EMPTY(hdr));
3367 3365 }
3368 3366
3369 3367 /*
3370 3368 * If the block to be written was all-zero, we may have
3371 3369 * compressed it away. In this case no write was performed
3372 3370 * so there will be no dva/birth/checksum. The buffer must
3373 3371 * therefore remain anonymous (and uncached).
3374 3372 */
3375 3373 if (!BUF_EMPTY(hdr)) {
3376 3374 arc_buf_hdr_t *exists;
3377 3375 kmutex_t *hash_lock;
3378 3376
3379 3377 ASSERT(zio->io_error == 0);
3380 3378
3381 3379 arc_cksum_verify(buf);
3382 3380
3383 3381 exists = buf_hash_insert(hdr, &hash_lock);
3384 3382 if (exists) {
3385 3383 /*
3386 3384 * This can only happen if we overwrite for
3387 3385 * sync-to-convergence, because we remove
3388 3386 * buffers from the hash table when we arc_free().
3389 3387 */
3390 3388 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3391 3389 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3392 3390 panic("bad overwrite, hdr=%p exists=%p",
3393 3391 (void *)hdr, (void *)exists);
3394 3392 ASSERT(refcount_is_zero(&exists->b_refcnt));
3395 3393 arc_change_state(arc_anon, exists, hash_lock);
3396 3394 mutex_exit(hash_lock);
3397 3395 arc_hdr_destroy(exists);
3398 3396 exists = buf_hash_insert(hdr, &hash_lock);
3399 3397 ASSERT3P(exists, ==, NULL);
3400 3398 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3401 3399 /* nopwrite */
3402 3400 ASSERT(zio->io_prop.zp_nopwrite);
3403 3401 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3404 3402 panic("bad nopwrite, hdr=%p exists=%p",
3405 3403 (void *)hdr, (void *)exists);
3406 3404 } else {
3407 3405 /* Dedup */
3408 3406 ASSERT(hdr->b_datacnt == 1);
3409 3407 ASSERT(hdr->b_state == arc_anon);
3410 3408 ASSERT(BP_GET_DEDUP(zio->io_bp));
3411 3409 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3412 3410 }
3413 3411 }
3414 3412 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3415 3413 /* if it's not anon, we are doing a scrub */
3416 3414 if (!exists && hdr->b_state == arc_anon)
3417 3415 arc_access(hdr, hash_lock);
3418 3416 mutex_exit(hash_lock);
3419 3417 } else {
3420 3418 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3421 3419 }
3422 3420
3423 3421 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3424 3422 callback->awcb_done(zio, buf, callback->awcb_private);
3425 3423
3426 3424 kmem_free(callback, sizeof (arc_write_callback_t));
3427 3425 }
3428 3426
3429 3427 zio_t *
3430 3428 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3431 3429 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3432 3430 arc_done_func_t *ready, arc_done_func_t *done, void *private,
3433 3431 int priority, int zio_flags, const zbookmark_t *zb)
3434 3432 {
3435 3433 arc_buf_hdr_t *hdr = buf->b_hdr;
3436 3434 arc_write_callback_t *callback;
3437 3435 zio_t *zio;
3438 3436
3439 3437 ASSERT(ready != NULL);
3440 3438 ASSERT(done != NULL);
3441 3439 ASSERT(!HDR_IO_ERROR(hdr));
3442 3440 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3443 3441 ASSERT(hdr->b_acb == NULL);
3444 3442 if (l2arc)
3445 3443 hdr->b_flags |= ARC_L2CACHE;
3446 3444 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3447 3445 callback->awcb_ready = ready;
3448 3446 callback->awcb_done = done;
3449 3447 callback->awcb_private = private;
3450 3448 callback->awcb_buf = buf;
3451 3449
3452 3450 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3453 3451 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3454 3452
3455 3453 return (zio);
3456 3454 }
3457 3455
3458 3456 static int
3459 3457 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3460 3458 {
3461 3459 #ifdef _KERNEL
3462 3460 uint64_t available_memory = ptob(freemem);
3463 3461 static uint64_t page_load = 0;
3464 3462 static uint64_t last_txg = 0;
3465 3463
3466 3464 #if defined(__i386)
3467 3465 available_memory =
3468 3466 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3469 3467 #endif
3470 3468 if (available_memory >= zfs_write_limit_max)
3471 3469 return (0);
3472 3470
3473 3471 if (txg > last_txg) {
3474 3472 last_txg = txg;
3475 3473 page_load = 0;
3476 3474 }
3477 3475 /*
3478 3476 * If we are in pageout, we know that memory is already tight,
3479 3477 * the arc is already going to be evicting, so we just want to
3480 3478 * continue to let page writes occur as quickly as possible.
3481 3479 */
3482 3480 if (curproc == proc_pageout) {
3483 3481 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3484 3482 return (SET_ERROR(ERESTART));
3485 3483 /* Note: reserve is inflated, so we deflate */
3486 3484 page_load += reserve / 8;
3487 3485 return (0);
3488 3486 } else if (page_load > 0 && arc_reclaim_needed()) {
3489 3487 /* memory is low, delay before restarting */
3490 3488 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3491 3489 return (SET_ERROR(EAGAIN));
3492 3490 }
3493 3491 page_load = 0;
3494 3492
3495 3493 if (arc_size > arc_c_min) {
3496 3494 uint64_t evictable_memory =
3497 3495 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3498 3496 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3499 3497 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3500 3498 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3501 3499 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3502 3500 }
3503 3501
3504 3502 if (inflight_data > available_memory / 4) {
3505 3503 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3506 3504 return (SET_ERROR(ERESTART));
3507 3505 }
3508 3506 #endif
3509 3507 return (0);
3510 3508 }
3511 3509
3512 3510 void
3513 3511 arc_tempreserve_clear(uint64_t reserve)
3514 3512 {
3515 3513 atomic_add_64(&arc_tempreserve, -reserve);
3516 3514 ASSERT((int64_t)arc_tempreserve >= 0);
3517 3515 }
3518 3516
3519 3517 int
3520 3518 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3521 3519 {
3522 3520 int error;
3523 3521 uint64_t anon_size;
3524 3522
3525 3523 #ifdef ZFS_DEBUG
3526 3524 /*
3527 3525 * Once in a while, fail for no reason. Everything should cope.
3528 3526 */
3529 3527 if (spa_get_random(10000) == 0) {
3530 3528 dprintf("forcing random failure\n");
3531 3529 return (SET_ERROR(ERESTART));
3532 3530 }
3533 3531 #endif
3534 3532 if (reserve > arc_c/4 && !arc_no_grow)
3535 3533 arc_c = MIN(arc_c_max, reserve * 4);
3536 3534 if (reserve > arc_c)
3537 3535 return (SET_ERROR(ENOMEM));
↓ open down ↓ |
2927 lines elided |
↑ open up ↑ |
3538 3536
3539 3537 /*
3540 3538 * Don't count loaned bufs as in flight dirty data to prevent long
3541 3539 * network delays from blocking transactions that are ready to be
3542 3540 * assigned to a txg.
3543 3541 */
3544 3542 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3545 3543
3546 3544 /*
3547 3545 * Writes will, almost always, require additional memory allocations
3548 - * in order to compress/encrypt/etc the data. We therefor need to
3546 + * in order to compress/encrypt/etc the data. We therefore need to
3549 3547 * make sure that there is sufficient available memory for this.
3550 3548 */
3551 3549 if (error = arc_memory_throttle(reserve, anon_size, txg))
3552 3550 return (error);
3553 3551
3554 3552 /*
3555 3553 * Throttle writes when the amount of dirty data in the cache
3556 3554 * gets too large. We try to keep the cache less than half full
3557 3555 * of dirty blocks so that our sync times don't grow too large.
3558 3556 * Note: if two requests come in concurrently, we might let them
3559 3557 * both succeed, when one of them should fail. Not a huge deal.
3560 3558 */
3561 3559
3562 3560 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3563 3561 anon_size > arc_c / 4) {
3564 3562 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3565 3563 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3566 3564 arc_tempreserve>>10,
3567 3565 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3568 3566 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3569 3567 reserve>>10, arc_c>>10);
3570 3568 return (SET_ERROR(ERESTART));
3571 3569 }
3572 3570 atomic_add_64(&arc_tempreserve, reserve);
3573 3571 return (0);
3574 3572 }
3575 3573
3576 3574 void
3577 3575 arc_init(void)
3578 3576 {
3579 3577 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3580 3578 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3581 3579
3582 3580 /* Convert seconds to clock ticks */
3583 3581 arc_min_prefetch_lifespan = 1 * hz;
3584 3582
3585 3583 /* Start out with 1/8 of all memory */
3586 3584 arc_c = physmem * PAGESIZE / 8;
3587 3585
3588 3586 #ifdef _KERNEL
3589 3587 /*
3590 3588 * On architectures where the physical memory can be larger
3591 3589 * than the addressable space (intel in 32-bit mode), we may
3592 3590 * need to limit the cache to 1/8 of VM size.
3593 3591 */
3594 3592 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3595 3593 #endif
3596 3594
3597 3595 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3598 3596 arc_c_min = MAX(arc_c / 4, 64<<20);
3599 3597 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3600 3598 if (arc_c * 8 >= 1<<30)
3601 3599 arc_c_max = (arc_c * 8) - (1<<30);
3602 3600 else
3603 3601 arc_c_max = arc_c_min;
3604 3602 arc_c_max = MAX(arc_c * 6, arc_c_max);
3605 3603
3606 3604 /*
3607 3605 * Allow the tunables to override our calculations if they are
3608 3606 * reasonable (ie. over 64MB)
3609 3607 */
3610 3608 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3611 3609 arc_c_max = zfs_arc_max;
3612 3610 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3613 3611 arc_c_min = zfs_arc_min;
3614 3612
3615 3613 arc_c = arc_c_max;
3616 3614 arc_p = (arc_c >> 1);
3617 3615
3618 3616 /* limit meta-data to 1/4 of the arc capacity */
3619 3617 arc_meta_limit = arc_c_max / 4;
3620 3618
3621 3619 /* Allow the tunable to override if it is reasonable */
3622 3620 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3623 3621 arc_meta_limit = zfs_arc_meta_limit;
3624 3622
3625 3623 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3626 3624 arc_c_min = arc_meta_limit / 2;
3627 3625
3628 3626 if (zfs_arc_grow_retry > 0)
3629 3627 arc_grow_retry = zfs_arc_grow_retry;
3630 3628
3631 3629 if (zfs_arc_shrink_shift > 0)
3632 3630 arc_shrink_shift = zfs_arc_shrink_shift;
3633 3631
3634 3632 if (zfs_arc_p_min_shift > 0)
3635 3633 arc_p_min_shift = zfs_arc_p_min_shift;
3636 3634
3637 3635 /* if kmem_flags are set, lets try to use less memory */
3638 3636 if (kmem_debugging())
3639 3637 arc_c = arc_c / 2;
3640 3638 if (arc_c < arc_c_min)
3641 3639 arc_c = arc_c_min;
3642 3640
3643 3641 arc_anon = &ARC_anon;
3644 3642 arc_mru = &ARC_mru;
3645 3643 arc_mru_ghost = &ARC_mru_ghost;
3646 3644 arc_mfu = &ARC_mfu;
3647 3645 arc_mfu_ghost = &ARC_mfu_ghost;
3648 3646 arc_l2c_only = &ARC_l2c_only;
3649 3647 arc_size = 0;
3650 3648
3651 3649 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3652 3650 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3653 3651 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3654 3652 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3655 3653 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3656 3654 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3657 3655
3658 3656 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3659 3657 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3660 3658 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3661 3659 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3662 3660 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3663 3661 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3664 3662 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3665 3663 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3666 3664 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3667 3665 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3668 3666 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3669 3667 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3670 3668 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3671 3669 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3672 3670 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3673 3671 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3674 3672 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3675 3673 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3676 3674 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3677 3675 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3678 3676
3679 3677 buf_init();
3680 3678
3681 3679 arc_thread_exit = 0;
3682 3680 arc_eviction_list = NULL;
3683 3681 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3684 3682 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3685 3683
3686 3684 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3687 3685 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3688 3686
3689 3687 if (arc_ksp != NULL) {
3690 3688 arc_ksp->ks_data = &arc_stats;
3691 3689 kstat_install(arc_ksp);
3692 3690 }
3693 3691
3694 3692 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3695 3693 TS_RUN, minclsyspri);
3696 3694
3697 3695 arc_dead = FALSE;
3698 3696 arc_warm = B_FALSE;
3699 3697
3700 3698 if (zfs_write_limit_max == 0)
3701 3699 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3702 3700 else
3703 3701 zfs_write_limit_shift = 0;
3704 3702 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3705 3703 }
3706 3704
3707 3705 void
3708 3706 arc_fini(void)
3709 3707 {
3710 3708 mutex_enter(&arc_reclaim_thr_lock);
3711 3709 arc_thread_exit = 1;
3712 3710 while (arc_thread_exit != 0)
3713 3711 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3714 3712 mutex_exit(&arc_reclaim_thr_lock);
3715 3713
3716 3714 arc_flush(NULL);
3717 3715
3718 3716 arc_dead = TRUE;
3719 3717
3720 3718 if (arc_ksp != NULL) {
3721 3719 kstat_delete(arc_ksp);
3722 3720 arc_ksp = NULL;
3723 3721 }
3724 3722
3725 3723 mutex_destroy(&arc_eviction_mtx);
3726 3724 mutex_destroy(&arc_reclaim_thr_lock);
3727 3725 cv_destroy(&arc_reclaim_thr_cv);
3728 3726
3729 3727 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3730 3728 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3731 3729 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3732 3730 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3733 3731 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3734 3732 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3735 3733 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3736 3734 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3737 3735
3738 3736 mutex_destroy(&arc_anon->arcs_mtx);
3739 3737 mutex_destroy(&arc_mru->arcs_mtx);
3740 3738 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3741 3739 mutex_destroy(&arc_mfu->arcs_mtx);
3742 3740 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3743 3741 mutex_destroy(&arc_l2c_only->arcs_mtx);
3744 3742
3745 3743 mutex_destroy(&zfs_write_limit_lock);
3746 3744
3747 3745 buf_fini();
3748 3746
3749 3747 ASSERT(arc_loaned_bytes == 0);
3750 3748 }
3751 3749
3752 3750 /*
3753 3751 * Level 2 ARC
3754 3752 *
3755 3753 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3756 3754 * It uses dedicated storage devices to hold cached data, which are populated
3757 3755 * using large infrequent writes. The main role of this cache is to boost
3758 3756 * the performance of random read workloads. The intended L2ARC devices
3759 3757 * include short-stroked disks, solid state disks, and other media with
3760 3758 * substantially faster read latency than disk.
3761 3759 *
3762 3760 * +-----------------------+
3763 3761 * | ARC |
3764 3762 * +-----------------------+
3765 3763 * | ^ ^
3766 3764 * | | |
3767 3765 * l2arc_feed_thread() arc_read()
3768 3766 * | | |
3769 3767 * | l2arc read |
3770 3768 * V | |
3771 3769 * +---------------+ |
3772 3770 * | L2ARC | |
3773 3771 * +---------------+ |
3774 3772 * | ^ |
3775 3773 * l2arc_write() | |
3776 3774 * | | |
3777 3775 * V | |
3778 3776 * +-------+ +-------+
3779 3777 * | vdev | | vdev |
3780 3778 * | cache | | cache |
3781 3779 * +-------+ +-------+
3782 3780 * +=========+ .-----.
3783 3781 * : L2ARC : |-_____-|
3784 3782 * : devices : | Disks |
3785 3783 * +=========+ `-_____-'
3786 3784 *
3787 3785 * Read requests are satisfied from the following sources, in order:
3788 3786 *
3789 3787 * 1) ARC
3790 3788 * 2) vdev cache of L2ARC devices
3791 3789 * 3) L2ARC devices
3792 3790 * 4) vdev cache of disks
3793 3791 * 5) disks
3794 3792 *
3795 3793 * Some L2ARC device types exhibit extremely slow write performance.
3796 3794 * To accommodate for this there are some significant differences between
3797 3795 * the L2ARC and traditional cache design:
3798 3796 *
3799 3797 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3800 3798 * the ARC behave as usual, freeing buffers and placing headers on ghost
3801 3799 * lists. The ARC does not send buffers to the L2ARC during eviction as
3802 3800 * this would add inflated write latencies for all ARC memory pressure.
3803 3801 *
3804 3802 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3805 3803 * It does this by periodically scanning buffers from the eviction-end of
3806 3804 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3807 3805 * not already there. It scans until a headroom of buffers is satisfied,
3808 3806 * which itself is a buffer for ARC eviction. The thread that does this is
3809 3807 * l2arc_feed_thread(), illustrated below; example sizes are included to
3810 3808 * provide a better sense of ratio than this diagram:
3811 3809 *
3812 3810 * head --> tail
3813 3811 * +---------------------+----------+
3814 3812 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3815 3813 * +---------------------+----------+ | o L2ARC eligible
3816 3814 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3817 3815 * +---------------------+----------+ |
3818 3816 * 15.9 Gbytes ^ 32 Mbytes |
3819 3817 * headroom |
3820 3818 * l2arc_feed_thread()
3821 3819 * |
3822 3820 * l2arc write hand <--[oooo]--'
3823 3821 * | 8 Mbyte
3824 3822 * | write max
3825 3823 * V
3826 3824 * +==============================+
3827 3825 * L2ARC dev |####|#|###|###| |####| ... |
3828 3826 * +==============================+
3829 3827 * 32 Gbytes
3830 3828 *
3831 3829 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3832 3830 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3833 3831 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3834 3832 * safe to say that this is an uncommon case, since buffers at the end of
3835 3833 * the ARC lists have moved there due to inactivity.
3836 3834 *
3837 3835 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3838 3836 * then the L2ARC simply misses copying some buffers. This serves as a
3839 3837 * pressure valve to prevent heavy read workloads from both stalling the ARC
3840 3838 * with waits and clogging the L2ARC with writes. This also helps prevent
3841 3839 * the potential for the L2ARC to churn if it attempts to cache content too
3842 3840 * quickly, such as during backups of the entire pool.
3843 3841 *
3844 3842 * 5. After system boot and before the ARC has filled main memory, there are
3845 3843 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3846 3844 * lists can remain mostly static. Instead of searching from tail of these
3847 3845 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3848 3846 * for eligible buffers, greatly increasing its chance of finding them.
3849 3847 *
3850 3848 * The L2ARC device write speed is also boosted during this time so that
3851 3849 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3852 3850 * there are no L2ARC reads, and no fear of degrading read performance
3853 3851 * through increased writes.
3854 3852 *
3855 3853 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3856 3854 * the vdev queue can aggregate them into larger and fewer writes. Each
3857 3855 * device is written to in a rotor fashion, sweeping writes through
3858 3856 * available space then repeating.
3859 3857 *
3860 3858 * 7. The L2ARC does not store dirty content. It never needs to flush
3861 3859 * write buffers back to disk based storage.
3862 3860 *
3863 3861 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3864 3862 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3865 3863 *
3866 3864 * The performance of the L2ARC can be tweaked by a number of tunables, which
3867 3865 * may be necessary for different workloads:
3868 3866 *
3869 3867 * l2arc_write_max max write bytes per interval
3870 3868 * l2arc_write_boost extra write bytes during device warmup
3871 3869 * l2arc_noprefetch skip caching prefetched buffers
3872 3870 * l2arc_headroom number of max device writes to precache
3873 3871 * l2arc_feed_secs seconds between L2ARC writing
3874 3872 *
3875 3873 * Tunables may be removed or added as future performance improvements are
3876 3874 * integrated, and also may become zpool properties.
3877 3875 *
3878 3876 * There are three key functions that control how the L2ARC warms up:
3879 3877 *
3880 3878 * l2arc_write_eligible() check if a buffer is eligible to cache
3881 3879 * l2arc_write_size() calculate how much to write
3882 3880 * l2arc_write_interval() calculate sleep delay between writes
3883 3881 *
3884 3882 * These three functions determine what to write, how much, and how quickly
3885 3883 * to send writes.
3886 3884 */
3887 3885
3888 3886 static boolean_t
3889 3887 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3890 3888 {
3891 3889 /*
3892 3890 * A buffer is *not* eligible for the L2ARC if it:
3893 3891 * 1. belongs to a different spa.
3894 3892 * 2. is already cached on the L2ARC.
3895 3893 * 3. has an I/O in progress (it may be an incomplete read).
3896 3894 * 4. is flagged not eligible (zfs property).
3897 3895 */
3898 3896 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3899 3897 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3900 3898 return (B_FALSE);
3901 3899
3902 3900 return (B_TRUE);
3903 3901 }
3904 3902
3905 3903 static uint64_t
3906 3904 l2arc_write_size(l2arc_dev_t *dev)
3907 3905 {
3908 3906 uint64_t size;
3909 3907
3910 3908 size = dev->l2ad_write;
3911 3909
3912 3910 if (arc_warm == B_FALSE)
3913 3911 size += dev->l2ad_boost;
3914 3912
3915 3913 return (size);
3916 3914
3917 3915 }
3918 3916
3919 3917 static clock_t
3920 3918 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3921 3919 {
3922 3920 clock_t interval, next, now;
3923 3921
3924 3922 /*
3925 3923 * If the ARC lists are busy, increase our write rate; if the
3926 3924 * lists are stale, idle back. This is achieved by checking
3927 3925 * how much we previously wrote - if it was more than half of
3928 3926 * what we wanted, schedule the next write much sooner.
3929 3927 */
3930 3928 if (l2arc_feed_again && wrote > (wanted / 2))
3931 3929 interval = (hz * l2arc_feed_min_ms) / 1000;
3932 3930 else
3933 3931 interval = hz * l2arc_feed_secs;
3934 3932
3935 3933 now = ddi_get_lbolt();
3936 3934 next = MAX(now, MIN(now + interval, began + interval));
3937 3935
3938 3936 return (next);
3939 3937 }
3940 3938
3941 3939 static void
3942 3940 l2arc_hdr_stat_add(void)
3943 3941 {
3944 3942 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3945 3943 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3946 3944 }
3947 3945
3948 3946 static void
3949 3947 l2arc_hdr_stat_remove(void)
3950 3948 {
3951 3949 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3952 3950 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3953 3951 }
3954 3952
3955 3953 /*
3956 3954 * Cycle through L2ARC devices. This is how L2ARC load balances.
3957 3955 * If a device is returned, this also returns holding the spa config lock.
3958 3956 */
3959 3957 static l2arc_dev_t *
3960 3958 l2arc_dev_get_next(void)
3961 3959 {
3962 3960 l2arc_dev_t *first, *next = NULL;
3963 3961
3964 3962 /*
3965 3963 * Lock out the removal of spas (spa_namespace_lock), then removal
3966 3964 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3967 3965 * both locks will be dropped and a spa config lock held instead.
3968 3966 */
3969 3967 mutex_enter(&spa_namespace_lock);
3970 3968 mutex_enter(&l2arc_dev_mtx);
3971 3969
3972 3970 /* if there are no vdevs, there is nothing to do */
3973 3971 if (l2arc_ndev == 0)
3974 3972 goto out;
3975 3973
3976 3974 first = NULL;
3977 3975 next = l2arc_dev_last;
3978 3976 do {
3979 3977 /* loop around the list looking for a non-faulted vdev */
3980 3978 if (next == NULL) {
3981 3979 next = list_head(l2arc_dev_list);
3982 3980 } else {
3983 3981 next = list_next(l2arc_dev_list, next);
3984 3982 if (next == NULL)
3985 3983 next = list_head(l2arc_dev_list);
3986 3984 }
3987 3985
3988 3986 /* if we have come back to the start, bail out */
3989 3987 if (first == NULL)
3990 3988 first = next;
3991 3989 else if (next == first)
3992 3990 break;
3993 3991
3994 3992 } while (vdev_is_dead(next->l2ad_vdev));
3995 3993
3996 3994 /* if we were unable to find any usable vdevs, return NULL */
3997 3995 if (vdev_is_dead(next->l2ad_vdev))
3998 3996 next = NULL;
3999 3997
4000 3998 l2arc_dev_last = next;
4001 3999
4002 4000 out:
4003 4001 mutex_exit(&l2arc_dev_mtx);
4004 4002
4005 4003 /*
4006 4004 * Grab the config lock to prevent the 'next' device from being
4007 4005 * removed while we are writing to it.
4008 4006 */
4009 4007 if (next != NULL)
4010 4008 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4011 4009 mutex_exit(&spa_namespace_lock);
4012 4010
4013 4011 return (next);
4014 4012 }
4015 4013
4016 4014 /*
4017 4015 * Free buffers that were tagged for destruction.
4018 4016 */
4019 4017 static void
4020 4018 l2arc_do_free_on_write()
4021 4019 {
4022 4020 list_t *buflist;
4023 4021 l2arc_data_free_t *df, *df_prev;
4024 4022
4025 4023 mutex_enter(&l2arc_free_on_write_mtx);
4026 4024 buflist = l2arc_free_on_write;
4027 4025
4028 4026 for (df = list_tail(buflist); df; df = df_prev) {
4029 4027 df_prev = list_prev(buflist, df);
4030 4028 ASSERT(df->l2df_data != NULL);
4031 4029 ASSERT(df->l2df_func != NULL);
4032 4030 df->l2df_func(df->l2df_data, df->l2df_size);
4033 4031 list_remove(buflist, df);
4034 4032 kmem_free(df, sizeof (l2arc_data_free_t));
4035 4033 }
4036 4034
4037 4035 mutex_exit(&l2arc_free_on_write_mtx);
4038 4036 }
4039 4037
4040 4038 /*
4041 4039 * A write to a cache device has completed. Update all headers to allow
4042 4040 * reads from these buffers to begin.
4043 4041 */
4044 4042 static void
4045 4043 l2arc_write_done(zio_t *zio)
4046 4044 {
4047 4045 l2arc_write_callback_t *cb;
4048 4046 l2arc_dev_t *dev;
4049 4047 list_t *buflist;
4050 4048 arc_buf_hdr_t *head, *ab, *ab_prev;
4051 4049 l2arc_buf_hdr_t *abl2;
4052 4050 kmutex_t *hash_lock;
4053 4051
4054 4052 cb = zio->io_private;
4055 4053 ASSERT(cb != NULL);
4056 4054 dev = cb->l2wcb_dev;
4057 4055 ASSERT(dev != NULL);
4058 4056 head = cb->l2wcb_head;
4059 4057 ASSERT(head != NULL);
4060 4058 buflist = dev->l2ad_buflist;
4061 4059 ASSERT(buflist != NULL);
4062 4060 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4063 4061 l2arc_write_callback_t *, cb);
4064 4062
4065 4063 if (zio->io_error != 0)
4066 4064 ARCSTAT_BUMP(arcstat_l2_writes_error);
4067 4065
4068 4066 mutex_enter(&l2arc_buflist_mtx);
4069 4067
4070 4068 /*
4071 4069 * All writes completed, or an error was hit.
4072 4070 */
4073 4071 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4074 4072 ab_prev = list_prev(buflist, ab);
4075 4073
4076 4074 hash_lock = HDR_LOCK(ab);
4077 4075 if (!mutex_tryenter(hash_lock)) {
4078 4076 /*
4079 4077 * This buffer misses out. It may be in a stage
4080 4078 * of eviction. Its ARC_L2_WRITING flag will be
4081 4079 * left set, denying reads to this buffer.
4082 4080 */
4083 4081 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4084 4082 continue;
4085 4083 }
4086 4084
4087 4085 if (zio->io_error != 0) {
4088 4086 /*
4089 4087 * Error - drop L2ARC entry.
4090 4088 */
4091 4089 list_remove(buflist, ab);
4092 4090 abl2 = ab->b_l2hdr;
4093 4091 ab->b_l2hdr = NULL;
4094 4092 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4095 4093 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4096 4094 }
4097 4095
4098 4096 /*
4099 4097 * Allow ARC to begin reads to this L2ARC entry.
4100 4098 */
4101 4099 ab->b_flags &= ~ARC_L2_WRITING;
4102 4100
4103 4101 mutex_exit(hash_lock);
4104 4102 }
4105 4103
4106 4104 atomic_inc_64(&l2arc_writes_done);
4107 4105 list_remove(buflist, head);
4108 4106 kmem_cache_free(hdr_cache, head);
4109 4107 mutex_exit(&l2arc_buflist_mtx);
4110 4108
4111 4109 l2arc_do_free_on_write();
4112 4110
4113 4111 kmem_free(cb, sizeof (l2arc_write_callback_t));
4114 4112 }
4115 4113
4116 4114 /*
4117 4115 * A read to a cache device completed. Validate buffer contents before
4118 4116 * handing over to the regular ARC routines.
4119 4117 */
4120 4118 static void
4121 4119 l2arc_read_done(zio_t *zio)
4122 4120 {
4123 4121 l2arc_read_callback_t *cb;
4124 4122 arc_buf_hdr_t *hdr;
4125 4123 arc_buf_t *buf;
4126 4124 kmutex_t *hash_lock;
4127 4125 int equal;
4128 4126
4129 4127 ASSERT(zio->io_vd != NULL);
4130 4128 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4131 4129
4132 4130 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4133 4131
4134 4132 cb = zio->io_private;
4135 4133 ASSERT(cb != NULL);
4136 4134 buf = cb->l2rcb_buf;
4137 4135 ASSERT(buf != NULL);
4138 4136
4139 4137 hash_lock = HDR_LOCK(buf->b_hdr);
4140 4138 mutex_enter(hash_lock);
4141 4139 hdr = buf->b_hdr;
4142 4140 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4143 4141
4144 4142 /*
4145 4143 * Check this survived the L2ARC journey.
4146 4144 */
4147 4145 equal = arc_cksum_equal(buf);
4148 4146 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4149 4147 mutex_exit(hash_lock);
4150 4148 zio->io_private = buf;
4151 4149 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4152 4150 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4153 4151 arc_read_done(zio);
4154 4152 } else {
4155 4153 mutex_exit(hash_lock);
4156 4154 /*
4157 4155 * Buffer didn't survive caching. Increment stats and
4158 4156 * reissue to the original storage device.
4159 4157 */
4160 4158 if (zio->io_error != 0) {
4161 4159 ARCSTAT_BUMP(arcstat_l2_io_error);
4162 4160 } else {
4163 4161 zio->io_error = SET_ERROR(EIO);
4164 4162 }
4165 4163 if (!equal)
4166 4164 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4167 4165
4168 4166 /*
4169 4167 * If there's no waiter, issue an async i/o to the primary
4170 4168 * storage now. If there *is* a waiter, the caller must
4171 4169 * issue the i/o in a context where it's OK to block.
4172 4170 */
4173 4171 if (zio->io_waiter == NULL) {
4174 4172 zio_t *pio = zio_unique_parent(zio);
4175 4173
4176 4174 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4177 4175
4178 4176 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4179 4177 buf->b_data, zio->io_size, arc_read_done, buf,
4180 4178 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4181 4179 }
4182 4180 }
4183 4181
4184 4182 kmem_free(cb, sizeof (l2arc_read_callback_t));
4185 4183 }
4186 4184
4187 4185 /*
4188 4186 * This is the list priority from which the L2ARC will search for pages to
4189 4187 * cache. This is used within loops (0..3) to cycle through lists in the
4190 4188 * desired order. This order can have a significant effect on cache
4191 4189 * performance.
4192 4190 *
4193 4191 * Currently the metadata lists are hit first, MFU then MRU, followed by
4194 4192 * the data lists. This function returns a locked list, and also returns
4195 4193 * the lock pointer.
4196 4194 */
4197 4195 static list_t *
4198 4196 l2arc_list_locked(int list_num, kmutex_t **lock)
4199 4197 {
4200 4198 list_t *list = NULL;
4201 4199
4202 4200 ASSERT(list_num >= 0 && list_num <= 3);
4203 4201
4204 4202 switch (list_num) {
4205 4203 case 0:
4206 4204 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4207 4205 *lock = &arc_mfu->arcs_mtx;
4208 4206 break;
4209 4207 case 1:
4210 4208 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4211 4209 *lock = &arc_mru->arcs_mtx;
4212 4210 break;
4213 4211 case 2:
4214 4212 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4215 4213 *lock = &arc_mfu->arcs_mtx;
4216 4214 break;
4217 4215 case 3:
4218 4216 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4219 4217 *lock = &arc_mru->arcs_mtx;
4220 4218 break;
4221 4219 }
4222 4220
4223 4221 ASSERT(!(MUTEX_HELD(*lock)));
4224 4222 mutex_enter(*lock);
4225 4223 return (list);
4226 4224 }
4227 4225
4228 4226 /*
4229 4227 * Evict buffers from the device write hand to the distance specified in
4230 4228 * bytes. This distance may span populated buffers, it may span nothing.
4231 4229 * This is clearing a region on the L2ARC device ready for writing.
4232 4230 * If the 'all' boolean is set, every buffer is evicted.
4233 4231 */
4234 4232 static void
4235 4233 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4236 4234 {
4237 4235 list_t *buflist;
4238 4236 l2arc_buf_hdr_t *abl2;
4239 4237 arc_buf_hdr_t *ab, *ab_prev;
4240 4238 kmutex_t *hash_lock;
4241 4239 uint64_t taddr;
4242 4240
4243 4241 buflist = dev->l2ad_buflist;
4244 4242
4245 4243 if (buflist == NULL)
4246 4244 return;
4247 4245
4248 4246 if (!all && dev->l2ad_first) {
4249 4247 /*
4250 4248 * This is the first sweep through the device. There is
4251 4249 * nothing to evict.
4252 4250 */
4253 4251 return;
4254 4252 }
4255 4253
4256 4254 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4257 4255 /*
4258 4256 * When nearing the end of the device, evict to the end
4259 4257 * before the device write hand jumps to the start.
4260 4258 */
4261 4259 taddr = dev->l2ad_end;
4262 4260 } else {
4263 4261 taddr = dev->l2ad_hand + distance;
4264 4262 }
4265 4263 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4266 4264 uint64_t, taddr, boolean_t, all);
4267 4265
4268 4266 top:
4269 4267 mutex_enter(&l2arc_buflist_mtx);
4270 4268 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4271 4269 ab_prev = list_prev(buflist, ab);
4272 4270
4273 4271 hash_lock = HDR_LOCK(ab);
4274 4272 if (!mutex_tryenter(hash_lock)) {
4275 4273 /*
4276 4274 * Missed the hash lock. Retry.
4277 4275 */
4278 4276 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4279 4277 mutex_exit(&l2arc_buflist_mtx);
4280 4278 mutex_enter(hash_lock);
4281 4279 mutex_exit(hash_lock);
4282 4280 goto top;
4283 4281 }
4284 4282
4285 4283 if (HDR_L2_WRITE_HEAD(ab)) {
4286 4284 /*
4287 4285 * We hit a write head node. Leave it for
4288 4286 * l2arc_write_done().
4289 4287 */
4290 4288 list_remove(buflist, ab);
4291 4289 mutex_exit(hash_lock);
4292 4290 continue;
4293 4291 }
4294 4292
4295 4293 if (!all && ab->b_l2hdr != NULL &&
4296 4294 (ab->b_l2hdr->b_daddr > taddr ||
4297 4295 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4298 4296 /*
4299 4297 * We've evicted to the target address,
4300 4298 * or the end of the device.
4301 4299 */
4302 4300 mutex_exit(hash_lock);
4303 4301 break;
4304 4302 }
4305 4303
4306 4304 if (HDR_FREE_IN_PROGRESS(ab)) {
4307 4305 /*
4308 4306 * Already on the path to destruction.
4309 4307 */
4310 4308 mutex_exit(hash_lock);
4311 4309 continue;
4312 4310 }
4313 4311
4314 4312 if (ab->b_state == arc_l2c_only) {
4315 4313 ASSERT(!HDR_L2_READING(ab));
4316 4314 /*
4317 4315 * This doesn't exist in the ARC. Destroy.
4318 4316 * arc_hdr_destroy() will call list_remove()
4319 4317 * and decrement arcstat_l2_size.
4320 4318 */
4321 4319 arc_change_state(arc_anon, ab, hash_lock);
4322 4320 arc_hdr_destroy(ab);
4323 4321 } else {
4324 4322 /*
4325 4323 * Invalidate issued or about to be issued
4326 4324 * reads, since we may be about to write
4327 4325 * over this location.
4328 4326 */
4329 4327 if (HDR_L2_READING(ab)) {
4330 4328 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4331 4329 ab->b_flags |= ARC_L2_EVICTED;
4332 4330 }
4333 4331
4334 4332 /*
4335 4333 * Tell ARC this no longer exists in L2ARC.
4336 4334 */
4337 4335 if (ab->b_l2hdr != NULL) {
4338 4336 abl2 = ab->b_l2hdr;
4339 4337 ab->b_l2hdr = NULL;
4340 4338 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4341 4339 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4342 4340 }
4343 4341 list_remove(buflist, ab);
4344 4342
4345 4343 /*
4346 4344 * This may have been leftover after a
4347 4345 * failed write.
4348 4346 */
4349 4347 ab->b_flags &= ~ARC_L2_WRITING;
4350 4348 }
4351 4349 mutex_exit(hash_lock);
4352 4350 }
4353 4351 mutex_exit(&l2arc_buflist_mtx);
4354 4352
4355 4353 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4356 4354 dev->l2ad_evict = taddr;
4357 4355 }
4358 4356
4359 4357 /*
4360 4358 * Find and write ARC buffers to the L2ARC device.
4361 4359 *
4362 4360 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4363 4361 * for reading until they have completed writing.
4364 4362 */
4365 4363 static uint64_t
4366 4364 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4367 4365 {
4368 4366 arc_buf_hdr_t *ab, *ab_prev, *head;
4369 4367 l2arc_buf_hdr_t *hdrl2;
4370 4368 list_t *list;
4371 4369 uint64_t passed_sz, write_sz, buf_sz, headroom;
4372 4370 void *buf_data;
4373 4371 kmutex_t *hash_lock, *list_lock;
4374 4372 boolean_t have_lock, full;
4375 4373 l2arc_write_callback_t *cb;
4376 4374 zio_t *pio, *wzio;
4377 4375 uint64_t guid = spa_load_guid(spa);
4378 4376
4379 4377 ASSERT(dev->l2ad_vdev != NULL);
4380 4378
4381 4379 pio = NULL;
4382 4380 write_sz = 0;
4383 4381 full = B_FALSE;
4384 4382 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4385 4383 head->b_flags |= ARC_L2_WRITE_HEAD;
4386 4384
4387 4385 /*
4388 4386 * Copy buffers for L2ARC writing.
4389 4387 */
4390 4388 mutex_enter(&l2arc_buflist_mtx);
4391 4389 for (int try = 0; try <= 3; try++) {
4392 4390 list = l2arc_list_locked(try, &list_lock);
4393 4391 passed_sz = 0;
4394 4392
4395 4393 /*
4396 4394 * L2ARC fast warmup.
4397 4395 *
4398 4396 * Until the ARC is warm and starts to evict, read from the
4399 4397 * head of the ARC lists rather than the tail.
4400 4398 */
4401 4399 headroom = target_sz * l2arc_headroom;
4402 4400 if (arc_warm == B_FALSE)
4403 4401 ab = list_head(list);
4404 4402 else
4405 4403 ab = list_tail(list);
4406 4404
4407 4405 for (; ab; ab = ab_prev) {
4408 4406 if (arc_warm == B_FALSE)
4409 4407 ab_prev = list_next(list, ab);
4410 4408 else
4411 4409 ab_prev = list_prev(list, ab);
4412 4410
4413 4411 hash_lock = HDR_LOCK(ab);
4414 4412 have_lock = MUTEX_HELD(hash_lock);
4415 4413 if (!have_lock && !mutex_tryenter(hash_lock)) {
4416 4414 /*
4417 4415 * Skip this buffer rather than waiting.
4418 4416 */
4419 4417 continue;
4420 4418 }
4421 4419
4422 4420 passed_sz += ab->b_size;
4423 4421 if (passed_sz > headroom) {
4424 4422 /*
4425 4423 * Searched too far.
4426 4424 */
4427 4425 mutex_exit(hash_lock);
4428 4426 break;
4429 4427 }
4430 4428
4431 4429 if (!l2arc_write_eligible(guid, ab)) {
4432 4430 mutex_exit(hash_lock);
4433 4431 continue;
4434 4432 }
4435 4433
4436 4434 if ((write_sz + ab->b_size) > target_sz) {
4437 4435 full = B_TRUE;
4438 4436 mutex_exit(hash_lock);
4439 4437 break;
4440 4438 }
4441 4439
4442 4440 if (pio == NULL) {
4443 4441 /*
4444 4442 * Insert a dummy header on the buflist so
4445 4443 * l2arc_write_done() can find where the
4446 4444 * write buffers begin without searching.
4447 4445 */
4448 4446 list_insert_head(dev->l2ad_buflist, head);
4449 4447
4450 4448 cb = kmem_alloc(
4451 4449 sizeof (l2arc_write_callback_t), KM_SLEEP);
4452 4450 cb->l2wcb_dev = dev;
4453 4451 cb->l2wcb_head = head;
4454 4452 pio = zio_root(spa, l2arc_write_done, cb,
4455 4453 ZIO_FLAG_CANFAIL);
4456 4454 }
4457 4455
4458 4456 /*
4459 4457 * Create and add a new L2ARC header.
4460 4458 */
4461 4459 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4462 4460 hdrl2->b_dev = dev;
4463 4461 hdrl2->b_daddr = dev->l2ad_hand;
4464 4462
4465 4463 ab->b_flags |= ARC_L2_WRITING;
4466 4464 ab->b_l2hdr = hdrl2;
4467 4465 list_insert_head(dev->l2ad_buflist, ab);
4468 4466 buf_data = ab->b_buf->b_data;
4469 4467 buf_sz = ab->b_size;
4470 4468
4471 4469 /*
4472 4470 * Compute and store the buffer cksum before
4473 4471 * writing. On debug the cksum is verified first.
4474 4472 */
4475 4473 arc_cksum_verify(ab->b_buf);
4476 4474 arc_cksum_compute(ab->b_buf, B_TRUE);
4477 4475
4478 4476 mutex_exit(hash_lock);
4479 4477
4480 4478 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4481 4479 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4482 4480 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4483 4481 ZIO_FLAG_CANFAIL, B_FALSE);
4484 4482
4485 4483 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4486 4484 zio_t *, wzio);
4487 4485 (void) zio_nowait(wzio);
4488 4486
4489 4487 /*
4490 4488 * Keep the clock hand suitably device-aligned.
4491 4489 */
4492 4490 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4493 4491
4494 4492 write_sz += buf_sz;
4495 4493 dev->l2ad_hand += buf_sz;
4496 4494 }
4497 4495
4498 4496 mutex_exit(list_lock);
4499 4497
4500 4498 if (full == B_TRUE)
4501 4499 break;
4502 4500 }
4503 4501 mutex_exit(&l2arc_buflist_mtx);
4504 4502
4505 4503 if (pio == NULL) {
4506 4504 ASSERT0(write_sz);
4507 4505 kmem_cache_free(hdr_cache, head);
4508 4506 return (0);
4509 4507 }
4510 4508
4511 4509 ASSERT3U(write_sz, <=, target_sz);
4512 4510 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4513 4511 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4514 4512 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4515 4513 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4516 4514
4517 4515 /*
4518 4516 * Bump device hand to the device start if it is approaching the end.
4519 4517 * l2arc_evict() will already have evicted ahead for this case.
4520 4518 */
4521 4519 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4522 4520 vdev_space_update(dev->l2ad_vdev,
4523 4521 dev->l2ad_end - dev->l2ad_hand, 0, 0);
4524 4522 dev->l2ad_hand = dev->l2ad_start;
4525 4523 dev->l2ad_evict = dev->l2ad_start;
4526 4524 dev->l2ad_first = B_FALSE;
4527 4525 }
4528 4526
4529 4527 dev->l2ad_writing = B_TRUE;
4530 4528 (void) zio_wait(pio);
4531 4529 dev->l2ad_writing = B_FALSE;
4532 4530
4533 4531 return (write_sz);
4534 4532 }
4535 4533
4536 4534 /*
4537 4535 * This thread feeds the L2ARC at regular intervals. This is the beating
4538 4536 * heart of the L2ARC.
4539 4537 */
4540 4538 static void
4541 4539 l2arc_feed_thread(void)
4542 4540 {
4543 4541 callb_cpr_t cpr;
4544 4542 l2arc_dev_t *dev;
4545 4543 spa_t *spa;
4546 4544 uint64_t size, wrote;
4547 4545 clock_t begin, next = ddi_get_lbolt();
4548 4546
4549 4547 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4550 4548
4551 4549 mutex_enter(&l2arc_feed_thr_lock);
4552 4550
4553 4551 while (l2arc_thread_exit == 0) {
4554 4552 CALLB_CPR_SAFE_BEGIN(&cpr);
4555 4553 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4556 4554 next);
4557 4555 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4558 4556 next = ddi_get_lbolt() + hz;
4559 4557
4560 4558 /*
4561 4559 * Quick check for L2ARC devices.
4562 4560 */
4563 4561 mutex_enter(&l2arc_dev_mtx);
4564 4562 if (l2arc_ndev == 0) {
4565 4563 mutex_exit(&l2arc_dev_mtx);
4566 4564 continue;
4567 4565 }
4568 4566 mutex_exit(&l2arc_dev_mtx);
4569 4567 begin = ddi_get_lbolt();
4570 4568
4571 4569 /*
4572 4570 * This selects the next l2arc device to write to, and in
4573 4571 * doing so the next spa to feed from: dev->l2ad_spa. This
4574 4572 * will return NULL if there are now no l2arc devices or if
4575 4573 * they are all faulted.
4576 4574 *
4577 4575 * If a device is returned, its spa's config lock is also
4578 4576 * held to prevent device removal. l2arc_dev_get_next()
4579 4577 * will grab and release l2arc_dev_mtx.
4580 4578 */
4581 4579 if ((dev = l2arc_dev_get_next()) == NULL)
4582 4580 continue;
4583 4581
4584 4582 spa = dev->l2ad_spa;
4585 4583 ASSERT(spa != NULL);
4586 4584
4587 4585 /*
4588 4586 * If the pool is read-only then force the feed thread to
4589 4587 * sleep a little longer.
4590 4588 */
4591 4589 if (!spa_writeable(spa)) {
4592 4590 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4593 4591 spa_config_exit(spa, SCL_L2ARC, dev);
4594 4592 continue;
4595 4593 }
4596 4594
4597 4595 /*
4598 4596 * Avoid contributing to memory pressure.
4599 4597 */
4600 4598 if (arc_reclaim_needed()) {
4601 4599 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4602 4600 spa_config_exit(spa, SCL_L2ARC, dev);
4603 4601 continue;
4604 4602 }
4605 4603
4606 4604 ARCSTAT_BUMP(arcstat_l2_feeds);
4607 4605
4608 4606 size = l2arc_write_size(dev);
4609 4607
4610 4608 /*
4611 4609 * Evict L2ARC buffers that will be overwritten.
4612 4610 */
4613 4611 l2arc_evict(dev, size, B_FALSE);
4614 4612
4615 4613 /*
4616 4614 * Write ARC buffers.
4617 4615 */
4618 4616 wrote = l2arc_write_buffers(spa, dev, size);
4619 4617
4620 4618 /*
4621 4619 * Calculate interval between writes.
4622 4620 */
4623 4621 next = l2arc_write_interval(begin, size, wrote);
4624 4622 spa_config_exit(spa, SCL_L2ARC, dev);
4625 4623 }
4626 4624
4627 4625 l2arc_thread_exit = 0;
4628 4626 cv_broadcast(&l2arc_feed_thr_cv);
4629 4627 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
4630 4628 thread_exit();
4631 4629 }
4632 4630
4633 4631 boolean_t
4634 4632 l2arc_vdev_present(vdev_t *vd)
4635 4633 {
4636 4634 l2arc_dev_t *dev;
4637 4635
4638 4636 mutex_enter(&l2arc_dev_mtx);
4639 4637 for (dev = list_head(l2arc_dev_list); dev != NULL;
4640 4638 dev = list_next(l2arc_dev_list, dev)) {
4641 4639 if (dev->l2ad_vdev == vd)
4642 4640 break;
4643 4641 }
4644 4642 mutex_exit(&l2arc_dev_mtx);
4645 4643
4646 4644 return (dev != NULL);
4647 4645 }
4648 4646
4649 4647 /*
4650 4648 * Add a vdev for use by the L2ARC. By this point the spa has already
4651 4649 * validated the vdev and opened it.
4652 4650 */
4653 4651 void
4654 4652 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
4655 4653 {
4656 4654 l2arc_dev_t *adddev;
4657 4655
4658 4656 ASSERT(!l2arc_vdev_present(vd));
4659 4657
4660 4658 /*
4661 4659 * Create a new l2arc device entry.
4662 4660 */
4663 4661 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4664 4662 adddev->l2ad_spa = spa;
4665 4663 adddev->l2ad_vdev = vd;
4666 4664 adddev->l2ad_write = l2arc_write_max;
4667 4665 adddev->l2ad_boost = l2arc_write_boost;
4668 4666 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
4669 4667 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
4670 4668 adddev->l2ad_hand = adddev->l2ad_start;
4671 4669 adddev->l2ad_evict = adddev->l2ad_start;
4672 4670 adddev->l2ad_first = B_TRUE;
4673 4671 adddev->l2ad_writing = B_FALSE;
4674 4672 ASSERT3U(adddev->l2ad_write, >, 0);
4675 4673
4676 4674 /*
4677 4675 * This is a list of all ARC buffers that are still valid on the
4678 4676 * device.
4679 4677 */
4680 4678 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4681 4679 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4682 4680 offsetof(arc_buf_hdr_t, b_l2node));
4683 4681
4684 4682 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
4685 4683
4686 4684 /*
4687 4685 * Add device to global list
4688 4686 */
4689 4687 mutex_enter(&l2arc_dev_mtx);
4690 4688 list_insert_head(l2arc_dev_list, adddev);
4691 4689 atomic_inc_64(&l2arc_ndev);
4692 4690 mutex_exit(&l2arc_dev_mtx);
4693 4691 }
4694 4692
4695 4693 /*
4696 4694 * Remove a vdev from the L2ARC.
4697 4695 */
4698 4696 void
4699 4697 l2arc_remove_vdev(vdev_t *vd)
4700 4698 {
4701 4699 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4702 4700
4703 4701 /*
4704 4702 * Find the device by vdev
4705 4703 */
4706 4704 mutex_enter(&l2arc_dev_mtx);
4707 4705 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4708 4706 nextdev = list_next(l2arc_dev_list, dev);
4709 4707 if (vd == dev->l2ad_vdev) {
4710 4708 remdev = dev;
4711 4709 break;
4712 4710 }
4713 4711 }
4714 4712 ASSERT(remdev != NULL);
4715 4713
4716 4714 /*
4717 4715 * Remove device from global list
4718 4716 */
4719 4717 list_remove(l2arc_dev_list, remdev);
4720 4718 l2arc_dev_last = NULL; /* may have been invalidated */
4721 4719 atomic_dec_64(&l2arc_ndev);
4722 4720 mutex_exit(&l2arc_dev_mtx);
4723 4721
4724 4722 /*
4725 4723 * Clear all buflists and ARC references. L2ARC device flush.
4726 4724 */
4727 4725 l2arc_evict(remdev, 0, B_TRUE);
4728 4726 list_destroy(remdev->l2ad_buflist);
4729 4727 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4730 4728 kmem_free(remdev, sizeof (l2arc_dev_t));
4731 4729 }
4732 4730
4733 4731 void
4734 4732 l2arc_init(void)
4735 4733 {
4736 4734 l2arc_thread_exit = 0;
4737 4735 l2arc_ndev = 0;
4738 4736 l2arc_writes_sent = 0;
4739 4737 l2arc_writes_done = 0;
4740 4738
4741 4739 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4742 4740 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4743 4741 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4744 4742 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4745 4743 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4746 4744
4747 4745 l2arc_dev_list = &L2ARC_dev_list;
4748 4746 l2arc_free_on_write = &L2ARC_free_on_write;
4749 4747 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4750 4748 offsetof(l2arc_dev_t, l2ad_node));
4751 4749 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4752 4750 offsetof(l2arc_data_free_t, l2df_list_node));
4753 4751 }
4754 4752
4755 4753 void
4756 4754 l2arc_fini(void)
4757 4755 {
4758 4756 /*
4759 4757 * This is called from dmu_fini(), which is called from spa_fini();
4760 4758 * Because of this, we can assume that all l2arc devices have
4761 4759 * already been removed when the pools themselves were removed.
4762 4760 */
4763 4761
4764 4762 l2arc_do_free_on_write();
4765 4763
4766 4764 mutex_destroy(&l2arc_feed_thr_lock);
4767 4765 cv_destroy(&l2arc_feed_thr_cv);
4768 4766 mutex_destroy(&l2arc_dev_mtx);
4769 4767 mutex_destroy(&l2arc_buflist_mtx);
4770 4768 mutex_destroy(&l2arc_free_on_write_mtx);
4771 4769
4772 4770 list_destroy(l2arc_dev_list);
4773 4771 list_destroy(l2arc_free_on_write);
4774 4772 }
4775 4773
4776 4774 void
4777 4775 l2arc_start(void)
4778 4776 {
4779 4777 if (!(spa_mode_global & FWRITE))
4780 4778 return;
4781 4779
4782 4780 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4783 4781 TS_RUN, minclsyspri);
4784 4782 }
4785 4783
4786 4784 void
4787 4785 l2arc_stop(void)
4788 4786 {
4789 4787 if (!(spa_mode_global & FWRITE))
4790 4788 return;
4791 4789
4792 4790 mutex_enter(&l2arc_feed_thr_lock);
4793 4791 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
4794 4792 l2arc_thread_exit = 1;
4795 4793 while (l2arc_thread_exit != 0)
4796 4794 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4797 4795 mutex_exit(&l2arc_feed_thr_lock);
4798 4796 }
↓ open down ↓ |
1240 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX